problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_21824
rasdani/github-patches
git_diff
kartoza__prj.app-476
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Certificate URL needs to include domain See the url below - it is not useful since it does not specify a domain. <img width="1285" alt="screen shot 2017-07-24 at 10 52 15 pm" src="https://user-images.githubusercontent.com/178003/28544268-e2f26940-70c2-11e7-8d08-d4c39b466600.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django_project/certification/views/certificate.py` Content: ``` 1 # coding=utf-8 2 from django.http import Http404, HttpResponse 3 from django.views.generic import CreateView, DetailView 4 from django.core.urlresolvers import reverse 5 from braces.views import LoginRequiredMixin 6 from reportlab.pdfgen import canvas 7 from reportlab.lib.pagesizes import A4, landscape 8 from reportlab.lib.utils import ImageReader 9 from ..models import Certificate, Course, Attendee 10 from ..forms import CertificateForm 11 from base.models.project import Project 12 13 14 class CertificateMixin(object): 15 """Mixin class to provide standard settings for Certificate.""" 16 17 model = Certificate 18 form_class = CertificateForm 19 20 21 class CertificateCreateView( 22 LoginRequiredMixin, CertificateMixin, CreateView): 23 """Create view for Certificate.""" 24 25 context_object_name = 'certificate' 26 template_name = 'certificate/create.html' 27 28 def get_success_url(self): 29 """Define the redirect URL. 30 31 After successful creation of the object, the User will be redirected 32 to the Course detail page. 33 34 :returns: URL 35 :rtype: HttpResponse 36 """ 37 38 return reverse('course-detail', kwargs={ 39 'project_slug': self.project_slug, 40 'organisation_slug': self.organisation_slug, 41 'slug': self.course_slug 42 }) 43 44 def get_context_data(self, **kwargs): 45 """Get the context data which is passed to a template. 46 47 :param kwargs: Any arguments to pass to the superclass. 48 :type kwargs: dict 49 50 :returns: Context data which will be passed to the template. 51 :rtype: dict 52 """ 53 54 context = super( 55 CertificateCreateView, self).get_context_data(**kwargs) 56 context['course'] = Course.objects.get(slug=self.course_slug) 57 context['attendee'] = Attendee.objects.get(pk=self.pk) 58 return context 59 60 def get_form_kwargs(self): 61 """Get keyword arguments from form. 62 63 :returns keyword argument from the form 64 :rtype: dict 65 """ 66 67 kwargs = super(CertificateCreateView, self).get_form_kwargs() 68 self.project_slug = self.kwargs.get('project_slug', None) 69 self.organisation_slug = self.kwargs.get('organisation_slug', None) 70 self.course_slug = self.kwargs.get('course_slug', None) 71 self.pk = self.kwargs.get('pk', None) 72 self.course = Course.objects.get(slug=self.course_slug) 73 self.attendee = Attendee.objects.get(pk=self.pk) 74 kwargs.update({ 75 'user': self.request.user, 76 'course': self.course, 77 'attendee': self.attendee, 78 }) 79 return kwargs 80 81 82 class CertificateDetailView(DetailView): 83 """Detail view for Certificate.""" 84 85 model = Certificate 86 context_object_name = 'certificate' 87 template_name = 'certificate/detail.html' 88 89 def get_context_data(self, **kwargs): 90 """Get the context data which is passed to a template. 91 92 :param kwargs: Any arguments to pass to the superclass. 93 :type kwargs: dict 94 95 :returns: Context data which will be passed to the template. 96 :rtype: dict 97 """ 98 99 self.certificateID = self.kwargs.get('id', None) 100 self.project_slug = self.kwargs.get('project_slug', None) 101 context = super( 102 CertificateDetailView, self).get_context_data(**kwargs) 103 issued_id = \ 104 Certificate.objects.all().values_list('certificateID', flat=True) 105 if self.certificateID in issued_id: 106 context['certificate'] = \ 107 Certificate.objects.get(certificateID=self.certificateID) 108 context['project_slug'] = self.project_slug 109 return context 110 111 def get_queryset(self): 112 """Get the queryset for this view. 113 114 :returns: Queryset which is all certificate in the 115 corresponding organisation. 116 :rtype: QuerySet 117 """ 118 119 qs = Certificate.objects.all() 120 return qs 121 122 def get_object(self, queryset=None): 123 """Get the object for this view. 124 125 :param queryset: A query set 126 :type queryset: QuerySet 127 128 :returns: Queryset which is filtered to only show a certificate 129 depends on the input certificate ID. 130 :rtype: QuerySet 131 :raises: Http404 132 """ 133 134 if queryset is None: 135 queryset = self.get_queryset() 136 certificateID = self.kwargs.get('id', None) 137 if certificateID: 138 try: 139 obj = queryset.get(certificateID=certificateID) 140 return obj 141 except Certificate.DoesNotExist: 142 return None 143 else: 144 raise Http404('Sorry! Certificate by this ID is not exist.') 145 146 147 def certificate_pdf_view(request, **kwargs): 148 149 project_slug = kwargs.pop('project_slug') 150 course_slug = kwargs.pop('course_slug') 151 pk = kwargs.pop('pk') 152 project = Project.objects.get(slug=project_slug) 153 course = Course.objects.get(slug=course_slug) 154 attendee = Attendee.objects.get(pk=pk) 155 certificate = Certificate.objects.get(course=course, attendee=attendee) 156 157 # Create the HttpResponse object with the appropriate PDF headers. 158 response = HttpResponse(content_type='application/pdf') 159 response['Content-Disposition'] = 'filename="certificate.pdf"' 160 161 # Create the PDF object, using the response object as its "file." 162 page = canvas.Canvas(response, pagesize=landscape(A4)) 163 width, height = A4 164 center = height * 0.5 165 166 if project.image_file: 167 project_logo = ImageReader(project.image_file) 168 else: 169 project_logo = None 170 171 if course.certifying_organisation.logo: 172 organisation_logo = ImageReader(course.certifying_organisation.logo) 173 else: 174 organisation_logo = None 175 176 if project.signature: 177 project_owner_signature = ImageReader(project.signature) 178 else: 179 project_owner_signature = None 180 181 if course.course_convener.signature: 182 convener_signature = ImageReader(course.course_convener.signature) 183 else: 184 convener_signature = None 185 186 if course.template_certificate: 187 background = ImageReader(course.template_certificate) 188 else: 189 background = None 190 191 # Certificate margin. 192 margin_right = height - 50 193 margin_left = 50 194 margin_bottom = 50 195 max_left = margin_right - 50 196 197 # Draw things on the PDF. Here's where the PDF generation happens. 198 # See the ReportLab documentation for the full list of functionality. 199 if background is not None: 200 page.drawImage( 201 background, 0, 0, height=width, width=height, 202 preserveAspectRatio=True, mask='auto') 203 page.setFillColorRGB(0.1, 0.1, 0.1) 204 page.setFont('Times-Roman', 18) 205 # page.drawString(margin_left, 480, project.name) 206 # page.drawRightString( 207 # (margin_right), 480, course.certifying_organisation.name) 208 209 if project_logo is not None: 210 page.drawImage( 211 project_logo, 50, 500, width=50, height=50, 212 preserveAspectRatio=True, mask='auto') 213 214 if organisation_logo is not None: 215 page.drawImage( 216 organisation_logo, max_left, 500, height=50, width=50, 217 preserveAspectRatio=True, anchor='c', mask='auto') 218 219 page.setFont('Times-Bold', 26) 220 page.drawCentredString(center, 480, 'Certificate of Completion') 221 page.drawCentredString( 222 center, 400, '%s %s' % (attendee.firstname, attendee.surname)) 223 page.setFont('Times-Roman', 16) 224 page.drawCentredString( 225 center, 360, 'Has attended and completed the course:') 226 page.setFont('Times-Bold', 20) 227 page.drawCentredString(center, 300, course.course_type.name) 228 page.setFont('Times-Roman', 16) 229 page.drawCentredString( 230 center, 270, 231 'From %s %s %s to %s %s %s' 232 % (course.start_date.day, course.start_date.strftime('%B'), 233 course.start_date.year, course.end_date.day, 234 course.end_date.strftime('%B'), course.end_date.year)) 235 page.setFillColorRGB(0.1, 0.1, 0.1) 236 page.drawCentredString( 237 center, 220, 'Convened by %s %s at %s' % ( 238 course.course_convener.user.first_name, 239 course.course_convener.user.last_name, course.training_center)) 240 241 if project_owner_signature is not None: 242 page.drawImage( 243 project_owner_signature, 244 (margin_left + 100), (margin_bottom + 70), width=100, height=70, 245 preserveAspectRatio=True, anchor='s', mask='auto') 246 247 if convener_signature is not None: 248 page.drawImage( 249 convener_signature, (margin_right - 200), (margin_bottom + 70), 250 width=100, height=70, preserveAspectRatio=True, anchor='s', 251 mask='auto') 252 253 page.setFont('Times-Italic', 12) 254 page.drawCentredString( 255 (margin_left + 150), (margin_bottom + 60), 256 '%s %s' % (project.owner.first_name, project.owner.last_name)) 257 page.drawCentredString( 258 (margin_right - 150), (margin_bottom + 60), 259 '%s %s' % ( 260 course.course_convener.user.first_name, 261 course.course_convener.user.last_name)) 262 page.line( 263 (margin_left + 70), (margin_bottom + 55), 264 (margin_left + 230), (margin_bottom + 55)) 265 page.line( 266 (margin_right - 70), (margin_bottom + 55), 267 (margin_right - 230), (margin_bottom + 55)) 268 page.setFont('Times-Roman', 13) 269 page.drawCentredString( 270 (margin_left + 150), (margin_bottom + 40), 'Project Owner') 271 page.drawCentredString( 272 (margin_right - 150), (margin_bottom + 40), 'Convener') 273 274 # Footnotes. 275 page.setFont('Times-Roman', 14) 276 page.drawString( 277 margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID) 278 page.setFont('Times-Roman', 8) 279 page.drawString( 280 margin_left, (margin_bottom - 20), 281 'You can verify this certificate by visiting /%s/certificate/%s/.' 282 % (project.slug, certificate.certificateID)) 283 284 # Close the PDF object cleanly. 285 page.showPage() 286 page.save() 287 return response 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py --- a/django_project/certification/views/certificate.py +++ b/django_project/certification/views/certificate.py @@ -153,6 +153,7 @@ course = Course.objects.get(slug=course_slug) attendee = Attendee.objects.get(pk=pk) certificate = Certificate.objects.get(course=course, attendee=attendee) + current_site = request.META['HTTP_HOST'] # Create the HttpResponse object with the appropriate PDF headers. response = HttpResponse(content_type='application/pdf') @@ -278,8 +279,9 @@ page.setFont('Times-Roman', 8) page.drawString( margin_left, (margin_bottom - 20), - 'You can verify this certificate by visiting /%s/certificate/%s/.' - % (project.slug, certificate.certificateID)) + 'You can verify this certificate by visiting ' + 'http://%s/en/%s/certificate/%s/.' + % (current_site, project.slug, certificate.certificateID)) # Close the PDF object cleanly. page.showPage()
{"golden_diff": "diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py\n--- a/django_project/certification/views/certificate.py\n+++ b/django_project/certification/views/certificate.py\n@@ -153,6 +153,7 @@\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n+ current_site = request.META['HTTP_HOST']\n \n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n@@ -278,8 +279,9 @@\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n- 'You can verify this certificate by visiting /%s/certificate/%s/.'\n- % (project.slug, certificate.certificateID))\n+ 'You can verify this certificate by visiting '\n+ 'http://%s/en/%s/certificate/%s/.'\n+ % (current_site, project.slug, certificate.certificateID))\n \n # Close the PDF object cleanly.\n page.showPage()\n", "issue": "Certificate URL needs to include domain\nSee the url below - it is not useful since it does not specify a domain.\r\n\r\n<img width=\"1285\" alt=\"screen shot 2017-07-24 at 10 52 15 pm\" src=\"https://user-images.githubusercontent.com/178003/28544268-e2f26940-70c2-11e7-8d08-d4c39b466600.png\">\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import CreateView, DetailView\nfrom django.core.urlresolvers import reverse\nfrom braces.views import LoginRequiredMixin\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.utils import ImageReader\nfrom ..models import Certificate, Course, Attendee\nfrom ..forms import CertificateForm\nfrom base.models.project import Project\n\n\nclass CertificateMixin(object):\n \"\"\"Mixin class to provide standard settings for Certificate.\"\"\"\n\n model = Certificate\n form_class = CertificateForm\n\n\nclass CertificateCreateView(\n LoginRequiredMixin, CertificateMixin, CreateView):\n \"\"\"Create view for Certificate.\"\"\"\n\n context_object_name = 'certificate'\n template_name = 'certificate/create.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful creation of the object, the User will be redirected\n to the Course detail page.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n\n return reverse('course-detail', kwargs={\n 'project_slug': self.project_slug,\n 'organisation_slug': self.organisation_slug,\n 'slug': self.course_slug\n })\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n context = super(\n CertificateCreateView, self).get_context_data(**kwargs)\n context['course'] = Course.objects.get(slug=self.course_slug)\n context['attendee'] = Attendee.objects.get(pk=self.pk)\n return context\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n\n kwargs = super(CertificateCreateView, self).get_form_kwargs()\n self.project_slug = self.kwargs.get('project_slug', None)\n self.organisation_slug = self.kwargs.get('organisation_slug', None)\n self.course_slug = self.kwargs.get('course_slug', None)\n self.pk = self.kwargs.get('pk', None)\n self.course = Course.objects.get(slug=self.course_slug)\n self.attendee = Attendee.objects.get(pk=self.pk)\n kwargs.update({\n 'user': self.request.user,\n 'course': self.course,\n 'attendee': self.attendee,\n })\n return kwargs\n\n\nclass CertificateDetailView(DetailView):\n \"\"\"Detail view for Certificate.\"\"\"\n\n model = Certificate\n context_object_name = 'certificate'\n template_name = 'certificate/detail.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n self.certificateID = self.kwargs.get('id', None)\n self.project_slug = self.kwargs.get('project_slug', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n issued_id = \\\n Certificate.objects.all().values_list('certificateID', flat=True)\n if self.certificateID in issued_id:\n context['certificate'] = \\\n Certificate.objects.get(certificateID=self.certificateID)\n context['project_slug'] = self.project_slug\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for this view.\n\n :returns: Queryset which is all certificate in the\n corresponding organisation.\n :rtype: QuerySet\n \"\"\"\n\n qs = Certificate.objects.all()\n return qs\n\n def get_object(self, queryset=None):\n \"\"\"Get the object for this view.\n\n :param queryset: A query set\n :type queryset: QuerySet\n\n :returns: Queryset which is filtered to only show a certificate\n depends on the input certificate ID.\n :rtype: QuerySet\n :raises: Http404\n \"\"\"\n\n if queryset is None:\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n try:\n obj = queryset.get(certificateID=certificateID)\n return obj\n except Certificate.DoesNotExist:\n return None\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n\n\ndef certificate_pdf_view(request, **kwargs):\n\n project_slug = kwargs.pop('project_slug')\n course_slug = kwargs.pop('course_slug')\n pk = kwargs.pop('pk')\n project = Project.objects.get(slug=project_slug)\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"certificate.pdf\"'\n\n # Create the PDF object, using the response object as its \"file.\"\n page = canvas.Canvas(response, pagesize=landscape(A4))\n width, height = A4\n center = height * 0.5\n\n if project.image_file:\n project_logo = ImageReader(project.image_file)\n else:\n project_logo = None\n\n if course.certifying_organisation.logo:\n organisation_logo = ImageReader(course.certifying_organisation.logo)\n else:\n organisation_logo = None\n\n if project.signature:\n project_owner_signature = ImageReader(project.signature)\n else:\n project_owner_signature = None\n\n if course.course_convener.signature:\n convener_signature = ImageReader(course.course_convener.signature)\n else:\n convener_signature = None\n\n if course.template_certificate:\n background = ImageReader(course.template_certificate)\n else:\n background = None\n\n # Certificate margin.\n margin_right = height - 50\n margin_left = 50\n margin_bottom = 50\n max_left = margin_right - 50\n\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n if background is not None:\n page.drawImage(\n background, 0, 0, height=width, width=height,\n preserveAspectRatio=True, mask='auto')\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.setFont('Times-Roman', 18)\n # page.drawString(margin_left, 480, project.name)\n # page.drawRightString(\n # (margin_right), 480, course.certifying_organisation.name)\n\n if project_logo is not None:\n page.drawImage(\n project_logo, 50, 500, width=50, height=50,\n preserveAspectRatio=True, mask='auto')\n\n if organisation_logo is not None:\n page.drawImage(\n organisation_logo, max_left, 500, height=50, width=50,\n preserveAspectRatio=True, anchor='c', mask='auto')\n\n page.setFont('Times-Bold', 26)\n page.drawCentredString(center, 480, 'Certificate of Completion')\n page.drawCentredString(\n center, 400, '%s %s' % (attendee.firstname, attendee.surname))\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 360, 'Has attended and completed the course:')\n page.setFont('Times-Bold', 20)\n page.drawCentredString(center, 300, course.course_type.name)\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 270,\n 'From %s %s %s to %s %s %s'\n % (course.start_date.day, course.start_date.strftime('%B'),\n course.start_date.year, course.end_date.day,\n course.end_date.strftime('%B'), course.end_date.year))\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.drawCentredString(\n center, 220, 'Convened by %s %s at %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name, course.training_center))\n\n if project_owner_signature is not None:\n page.drawImage(\n project_owner_signature,\n (margin_left + 100), (margin_bottom + 70), width=100, height=70,\n preserveAspectRatio=True, anchor='s', mask='auto')\n\n if convener_signature is not None:\n page.drawImage(\n convener_signature, (margin_right - 200), (margin_bottom + 70),\n width=100, height=70, preserveAspectRatio=True, anchor='s',\n mask='auto')\n\n page.setFont('Times-Italic', 12)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 60),\n '%s %s' % (project.owner.first_name, project.owner.last_name))\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 60),\n '%s %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name))\n page.line(\n (margin_left + 70), (margin_bottom + 55),\n (margin_left + 230), (margin_bottom + 55))\n page.line(\n (margin_right - 70), (margin_bottom + 55),\n (margin_right - 230), (margin_bottom + 55))\n page.setFont('Times-Roman', 13)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 40), 'Project Owner')\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 40), 'Convener')\n\n # Footnotes.\n page.setFont('Times-Roman', 14)\n page.drawString(\n margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n 'You can verify this certificate by visiting /%s/certificate/%s/.'\n % (project.slug, certificate.certificateID))\n\n # Close the PDF object cleanly.\n page.showPage()\n page.save()\n return response\n", "path": "django_project/certification/views/certificate.py"}], "after_files": [{"content": "# coding=utf-8\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import CreateView, DetailView\nfrom django.core.urlresolvers import reverse\nfrom braces.views import LoginRequiredMixin\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.utils import ImageReader\nfrom ..models import Certificate, Course, Attendee\nfrom ..forms import CertificateForm\nfrom base.models.project import Project\n\n\nclass CertificateMixin(object):\n \"\"\"Mixin class to provide standard settings for Certificate.\"\"\"\n\n model = Certificate\n form_class = CertificateForm\n\n\nclass CertificateCreateView(\n LoginRequiredMixin, CertificateMixin, CreateView):\n \"\"\"Create view for Certificate.\"\"\"\n\n context_object_name = 'certificate'\n template_name = 'certificate/create.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful creation of the object, the User will be redirected\n to the Course detail page.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n\n return reverse('course-detail', kwargs={\n 'project_slug': self.project_slug,\n 'organisation_slug': self.organisation_slug,\n 'slug': self.course_slug\n })\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n context = super(\n CertificateCreateView, self).get_context_data(**kwargs)\n context['course'] = Course.objects.get(slug=self.course_slug)\n context['attendee'] = Attendee.objects.get(pk=self.pk)\n return context\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n\n kwargs = super(CertificateCreateView, self).get_form_kwargs()\n self.project_slug = self.kwargs.get('project_slug', None)\n self.organisation_slug = self.kwargs.get('organisation_slug', None)\n self.course_slug = self.kwargs.get('course_slug', None)\n self.pk = self.kwargs.get('pk', None)\n self.course = Course.objects.get(slug=self.course_slug)\n self.attendee = Attendee.objects.get(pk=self.pk)\n kwargs.update({\n 'user': self.request.user,\n 'course': self.course,\n 'attendee': self.attendee,\n })\n return kwargs\n\n\nclass CertificateDetailView(DetailView):\n \"\"\"Detail view for Certificate.\"\"\"\n\n model = Certificate\n context_object_name = 'certificate'\n template_name = 'certificate/detail.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n self.certificateID = self.kwargs.get('id', None)\n self.project_slug = self.kwargs.get('project_slug', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n issued_id = \\\n Certificate.objects.all().values_list('certificateID', flat=True)\n if self.certificateID in issued_id:\n context['certificate'] = \\\n Certificate.objects.get(certificateID=self.certificateID)\n context['project_slug'] = self.project_slug\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for this view.\n\n :returns: Queryset which is all certificate in the\n corresponding organisation.\n :rtype: QuerySet\n \"\"\"\n\n qs = Certificate.objects.all()\n return qs\n\n def get_object(self, queryset=None):\n \"\"\"Get the object for this view.\n\n :param queryset: A query set\n :type queryset: QuerySet\n\n :returns: Queryset which is filtered to only show a certificate\n depends on the input certificate ID.\n :rtype: QuerySet\n :raises: Http404\n \"\"\"\n\n if queryset is None:\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n try:\n obj = queryset.get(certificateID=certificateID)\n return obj\n except Certificate.DoesNotExist:\n return None\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n\n\ndef certificate_pdf_view(request, **kwargs):\n\n project_slug = kwargs.pop('project_slug')\n course_slug = kwargs.pop('course_slug')\n pk = kwargs.pop('pk')\n project = Project.objects.get(slug=project_slug)\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n current_site = request.META['HTTP_HOST']\n\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"certificate.pdf\"'\n\n # Create the PDF object, using the response object as its \"file.\"\n page = canvas.Canvas(response, pagesize=landscape(A4))\n width, height = A4\n center = height * 0.5\n\n if project.image_file:\n project_logo = ImageReader(project.image_file)\n else:\n project_logo = None\n\n if course.certifying_organisation.logo:\n organisation_logo = ImageReader(course.certifying_organisation.logo)\n else:\n organisation_logo = None\n\n if project.signature:\n project_owner_signature = ImageReader(project.signature)\n else:\n project_owner_signature = None\n\n if course.course_convener.signature:\n convener_signature = ImageReader(course.course_convener.signature)\n else:\n convener_signature = None\n\n if course.template_certificate:\n background = ImageReader(course.template_certificate)\n else:\n background = None\n\n # Certificate margin.\n margin_right = height - 50\n margin_left = 50\n margin_bottom = 50\n max_left = margin_right - 50\n\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n if background is not None:\n page.drawImage(\n background, 0, 0, height=width, width=height,\n preserveAspectRatio=True, mask='auto')\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.setFont('Times-Roman', 18)\n # page.drawString(margin_left, 480, project.name)\n # page.drawRightString(\n # (margin_right), 480, course.certifying_organisation.name)\n\n if project_logo is not None:\n page.drawImage(\n project_logo, 50, 500, width=50, height=50,\n preserveAspectRatio=True, mask='auto')\n\n if organisation_logo is not None:\n page.drawImage(\n organisation_logo, max_left, 500, height=50, width=50,\n preserveAspectRatio=True, anchor='c', mask='auto')\n\n page.setFont('Times-Bold', 26)\n page.drawCentredString(center, 480, 'Certificate of Completion')\n page.drawCentredString(\n center, 400, '%s %s' % (attendee.firstname, attendee.surname))\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 360, 'Has attended and completed the course:')\n page.setFont('Times-Bold', 20)\n page.drawCentredString(center, 300, course.course_type.name)\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 270,\n 'From %s %s %s to %s %s %s'\n % (course.start_date.day, course.start_date.strftime('%B'),\n course.start_date.year, course.end_date.day,\n course.end_date.strftime('%B'), course.end_date.year))\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.drawCentredString(\n center, 220, 'Convened by %s %s at %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name, course.training_center))\n\n if project_owner_signature is not None:\n page.drawImage(\n project_owner_signature,\n (margin_left + 100), (margin_bottom + 70), width=100, height=70,\n preserveAspectRatio=True, anchor='s', mask='auto')\n\n if convener_signature is not None:\n page.drawImage(\n convener_signature, (margin_right - 200), (margin_bottom + 70),\n width=100, height=70, preserveAspectRatio=True, anchor='s',\n mask='auto')\n\n page.setFont('Times-Italic', 12)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 60),\n '%s %s' % (project.owner.first_name, project.owner.last_name))\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 60),\n '%s %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name))\n page.line(\n (margin_left + 70), (margin_bottom + 55),\n (margin_left + 230), (margin_bottom + 55))\n page.line(\n (margin_right - 70), (margin_bottom + 55),\n (margin_right - 230), (margin_bottom + 55))\n page.setFont('Times-Roman', 13)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 40), 'Project Owner')\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 40), 'Convener')\n\n # Footnotes.\n page.setFont('Times-Roman', 14)\n page.drawString(\n margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n 'You can verify this certificate by visiting '\n 'http://%s/en/%s/certificate/%s/.'\n % (current_site, project.slug, certificate.certificateID))\n\n # Close the PDF object cleanly.\n page.showPage()\n page.save()\n return response\n", "path": "django_project/certification/views/certificate.py"}]}
3,486
264
gh_patches_debug_34378
rasdani/github-patches
git_diff
nipy__nipype-3637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CSVReader does not handle quoted fields or tsv files ### Summary I want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal. ### Actual behavior Parsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported. ### Expected behavior Quoted fields should be correctly parsed if they contain the delimiter. ### How to replicate the behavior Run the script below. ### Script/Workflow details ``` from nipype.interfaces import utility reader = utility.CSVReader() lines = ["foo,\"hello,world\",300.1\n"] with open("testcsv.csv", "w") as fid: fid.writelines(lines) fid.flush() reader.inputs.in_file = "testcsv.csv" out = reader.run() assert out.outputs.column_1 == ["hello, world"] ``` ### Platform details: <!-- Please run the following code from your shell and place the output between the triple ticks, below. python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())" --> ``` {'commit_hash': 'c46a957bd', 'commit_source': 'repository', 'networkx_version': '3.2.1', 'nibabel_version': '5.2.1', 'nipype_version': '1.8.7.dev0', 'numpy_version': '1.26.4', 'pkg_path': '/home/USER/Desktop/nipype/nipype', 'scipy_version': '1.12.0', 'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python', 'sys_platform': 'linux', 'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \n[GCC 11.4.0]', 'traits_version': '6.3.2'} ``` ### Execution environment - My python environment outside container --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nipype/interfaces/utility/csv.py` Content: ``` 1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 # vi: set ft=python sts=4 ts=4 sw=4 et: 3 """CSV Handling utilities 4 """ 5 from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface 6 from ..io import add_traits 7 8 9 class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec): 10 in_file = File( 11 exists=True, mandatory=True, desc="Input comma-seperated value (CSV) file" 12 ) 13 header = traits.Bool( 14 False, usedefault=True, desc="True if the first line is a column header" 15 ) 16 17 18 class CSVReader(BaseInterface): 19 """ 20 Examples 21 -------- 22 23 >>> reader = CSVReader() # doctest: +SKIP 24 >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP 25 >>> out = reader.run() # doctest: +SKIP 26 >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP 27 True 28 >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP 29 True 30 >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP 31 True 32 33 >>> reader = CSVReader() # doctest: +SKIP 34 >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP 35 >>> reader.inputs.header = True # doctest: +SKIP 36 >>> out = reader.run() # doctest: +SKIP 37 >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP 38 True 39 >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP 40 True 41 >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP 42 True 43 44 """ 45 46 input_spec = CSVReaderInputSpec 47 output_spec = DynamicTraitedSpec 48 _always_run = True 49 50 def _append_entry(self, outputs, entry): 51 for key, value in zip(self._outfields, entry): 52 outputs[key].append(value) 53 return outputs 54 55 def _parse_line(self, line): 56 line = line.replace("\n", "") 57 entry = [x.strip() for x in line.split(",")] 58 return entry 59 60 def _get_outfields(self): 61 with open(self.inputs.in_file) as fid: 62 entry = self._parse_line(fid.readline()) 63 if self.inputs.header: 64 self._outfields = tuple(entry) 65 else: 66 self._outfields = tuple(["column_" + str(x) for x in range(len(entry))]) 67 return self._outfields 68 69 def _run_interface(self, runtime): 70 self._get_outfields() 71 return runtime 72 73 def _outputs(self): 74 return self._add_output_traits(super()._outputs()) 75 76 def _add_output_traits(self, base): 77 return add_traits(base, self._get_outfields()) 78 79 def _list_outputs(self): 80 outputs = self.output_spec().get() 81 isHeader = True 82 for key in self._outfields: 83 outputs[key] = [] # initialize outfields 84 with open(self.inputs.in_file) as fid: 85 for line in fid.readlines(): 86 if self.inputs.header and isHeader: # skip header line 87 isHeader = False 88 continue 89 entry = self._parse_line(line) 90 outputs = self._append_entry(outputs, entry) 91 return outputs 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py --- a/nipype/interfaces/utility/csv.py +++ b/nipype/interfaces/utility/csv.py @@ -2,6 +2,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """CSV Handling utilities """ +import csv from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface from ..io import add_traits @@ -13,6 +14,7 @@ header = traits.Bool( False, usedefault=True, desc="True if the first line is a column header" ) + delimiter = traits.String(",", usedefault=True, desc="Delimiter to use.") class CSVReader(BaseInterface): @@ -52,14 +54,11 @@ outputs[key].append(value) return outputs - def _parse_line(self, line): - line = line.replace("\n", "") - entry = [x.strip() for x in line.split(",")] - return entry - def _get_outfields(self): with open(self.inputs.in_file) as fid: - entry = self._parse_line(fid.readline()) + reader = csv.reader(fid, delimiter=self.inputs.delimiter) + + entry = next(reader) if self.inputs.header: self._outfields = tuple(entry) else: @@ -82,10 +81,10 @@ for key in self._outfields: outputs[key] = [] # initialize outfields with open(self.inputs.in_file) as fid: - for line in fid.readlines(): + reader = csv.reader(fid, delimiter=self.inputs.delimiter) + for entry in reader: if self.inputs.header and isHeader: # skip header line isHeader = False continue - entry = self._parse_line(line) outputs = self._append_entry(outputs, entry) return outputs
{"golden_diff": "diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py\n--- a/nipype/interfaces/utility/csv.py\n+++ b/nipype/interfaces/utility/csv.py\n@@ -2,6 +2,7 @@\n # vi: set ft=python sts=4 ts=4 sw=4 et:\n \"\"\"CSV Handling utilities\n \"\"\"\n+import csv\n from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\n from ..io import add_traits\n \n@@ -13,6 +14,7 @@\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n+ delimiter = traits.String(\",\", usedefault=True, desc=\"Delimiter to use.\")\n \n \n class CSVReader(BaseInterface):\n@@ -52,14 +54,11 @@\n outputs[key].append(value)\n return outputs\n \n- def _parse_line(self, line):\n- line = line.replace(\"\\n\", \"\")\n- entry = [x.strip() for x in line.split(\",\")]\n- return entry\n-\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n- entry = self._parse_line(fid.readline())\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+\n+ entry = next(reader)\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n@@ -82,10 +81,10 @@\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n- for line in fid.readlines():\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+ for entry in reader:\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n- entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "issue": "CSVReader does not handle quoted fields or tsv files\n### Summary\r\nI want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal.\r\n\r\n### Actual behavior\r\nParsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported.\r\n\r\n\r\n### Expected behavior\r\nQuoted fields should be correctly parsed if they contain the delimiter.\r\n\r\n### How to replicate the behavior\r\nRun the script below.\r\n\r\n### Script/Workflow details\r\n\r\n```\r\nfrom nipype.interfaces import utility\r\n\r\nreader = utility.CSVReader()\r\n\r\nlines = [\"foo,\\\"hello,world\\\",300.1\\n\"]\r\n\r\nwith open(\"testcsv.csv\", \"w\") as fid:\r\n fid.writelines(lines)\r\n fid.flush()\r\n\r\nreader.inputs.in_file = \"testcsv.csv\"\r\nout = reader.run()\r\n\r\nassert out.outputs.column_1 == [\"hello, world\"]\r\n```\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n{'commit_hash': 'c46a957bd',\r\n 'commit_source': 'repository',\r\n 'networkx_version': '3.2.1',\r\n 'nibabel_version': '5.2.1',\r\n 'nipype_version': '1.8.7.dev0',\r\n 'numpy_version': '1.26.4',\r\n 'pkg_path': '/home/USER/Desktop/nipype/nipype',\r\n 'scipy_version': '1.12.0',\r\n 'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \\n[GCC 11.4.0]',\r\n 'traits_version': '6.3.2'}\r\n\r\n```\r\n\r\n### Execution environment\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"CSV Handling utilities\n\"\"\"\nfrom ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\nfrom ..io import add_traits\n\n\nclass CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"Input comma-seperated value (CSV) file\"\n )\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n\n\nclass CSVReader(BaseInterface):\n \"\"\"\n Examples\n --------\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP\n >>> reader.inputs.header = True # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n \"\"\"\n\n input_spec = CSVReaderInputSpec\n output_spec = DynamicTraitedSpec\n _always_run = True\n\n def _append_entry(self, outputs, entry):\n for key, value in zip(self._outfields, entry):\n outputs[key].append(value)\n return outputs\n\n def _parse_line(self, line):\n line = line.replace(\"\\n\", \"\")\n entry = [x.strip() for x in line.split(\",\")]\n return entry\n\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n entry = self._parse_line(fid.readline())\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n self._outfields = tuple([\"column_\" + str(x) for x in range(len(entry))])\n return self._outfields\n\n def _run_interface(self, runtime):\n self._get_outfields()\n return runtime\n\n def _outputs(self):\n return self._add_output_traits(super()._outputs())\n\n def _add_output_traits(self, base):\n return add_traits(base, self._get_outfields())\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n isHeader = True\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n for line in fid.readlines():\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "path": "nipype/interfaces/utility/csv.py"}], "after_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"CSV Handling utilities\n\"\"\"\nimport csv\nfrom ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\nfrom ..io import add_traits\n\n\nclass CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"Input comma-seperated value (CSV) file\"\n )\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n delimiter = traits.String(\",\", usedefault=True, desc=\"Delimiter to use.\")\n\n\nclass CSVReader(BaseInterface):\n \"\"\"\n Examples\n --------\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP\n >>> reader.inputs.header = True # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n \"\"\"\n\n input_spec = CSVReaderInputSpec\n output_spec = DynamicTraitedSpec\n _always_run = True\n\n def _append_entry(self, outputs, entry):\n for key, value in zip(self._outfields, entry):\n outputs[key].append(value)\n return outputs\n\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n\n entry = next(reader)\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n self._outfields = tuple([\"column_\" + str(x) for x in range(len(entry))])\n return self._outfields\n\n def _run_interface(self, runtime):\n self._get_outfields()\n return runtime\n\n def _outputs(self):\n return self._add_output_traits(super()._outputs())\n\n def _add_output_traits(self, base):\n return add_traits(base, self._get_outfields())\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n isHeader = True\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n for entry in reader:\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "path": "nipype/interfaces/utility/csv.py"}]}
1,690
428
gh_patches_debug_22105
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4902
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Protect against web/api 5xx responses When a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result: - Builds will get stuck in triggered state - Builds will fail randomly when updating the api fails - 5xx responses will be returned from the web servers to users Part of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group. Raised from #2255 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/restapi/client.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Simple client to access our API with Slumber credentials.""" 4 5 from __future__ import ( 6 absolute_import, 7 division, 8 print_function, 9 unicode_literals, 10 ) 11 12 import logging 13 14 import requests 15 from django.conf import settings 16 from requests_toolbelt.adapters import host_header_ssl 17 from rest_framework.renderers import JSONRenderer 18 from slumber import API, serialize 19 20 log = logging.getLogger(__name__) 21 22 PRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org') 23 API_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org') 24 USER = getattr(settings, 'SLUMBER_USERNAME', None) 25 PASS = getattr(settings, 'SLUMBER_PASSWORD', None) 26 27 28 class DrfJsonSerializer(serialize.JsonSerializer): 29 30 """Additional serialization help from the DRF renderer""" 31 32 key = 'json-drf' 33 34 def dumps(self, data): 35 """Used to be able to render datetime objects.""" 36 return JSONRenderer().render(data) 37 38 39 def setup_api(): 40 session = requests.Session() 41 if API_HOST.startswith('https'): 42 # Only use the HostHeaderSSLAdapter for HTTPS connections 43 adapter_class = host_header_ssl.HostHeaderSSLAdapter 44 else: 45 adapter_class = requests.adapters.HTTPAdapter 46 47 session.mount( 48 API_HOST, 49 adapter_class(max_retries=3), 50 ) 51 session.headers.update({'Host': PRODUCTION_DOMAIN}) 52 api_config = { 53 'base_url': '%s/api/v2/' % API_HOST, 54 'serializer': serialize.Serializer( 55 default='json-drf', 56 serializers=[ 57 serialize.JsonSerializer(), 58 DrfJsonSerializer(), 59 ], 60 ), 61 'session': session, 62 } 63 if USER and PASS: 64 log.debug( 65 'Using slumber v2 with user %s, pointed at %s', 66 USER, 67 API_HOST, 68 ) 69 session.auth = (USER, PASS) 70 else: 71 log.warning('SLUMBER_USERNAME/PASSWORD settings are not set') 72 return API(**api_config) 73 74 75 api = setup_api() 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py --- a/readthedocs/restapi/client.py +++ b/readthedocs/restapi/client.py @@ -13,6 +13,7 @@ import requests from django.conf import settings +from requests.packages.urllib3.util.retry import Retry # noqa from requests_toolbelt.adapters import host_header_ssl from rest_framework.renderers import JSONRenderer from slumber import API, serialize @@ -44,9 +45,21 @@ else: adapter_class = requests.adapters.HTTPAdapter + # Define a retry mechanism trying to attempt to not fail in the first + # error. Builders hit this issue frequently because the webs are high loaded + retry = Retry( + total=3, + read=3, + connect=3, + status=3, + backoff_factor=0.5, # 0.5, 1, 2 seconds + method_whitelist=('GET', 'PUT', 'PATCH', 'POST'), + status_forcelist=(408, 413, 429, 500, 502, 503, 504), + ) + session.mount( API_HOST, - adapter_class(max_retries=3), + adapter_class(max_retries=retry), ) session.headers.update({'Host': PRODUCTION_DOMAIN}) api_config = {
{"golden_diff": "diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py\n--- a/readthedocs/restapi/client.py\n+++ b/readthedocs/restapi/client.py\n@@ -13,6 +13,7 @@\n \n import requests\n from django.conf import settings\n+from requests.packages.urllib3.util.retry import Retry # noqa\n from requests_toolbelt.adapters import host_header_ssl\n from rest_framework.renderers import JSONRenderer\n from slumber import API, serialize\n@@ -44,9 +45,21 @@\n else:\n adapter_class = requests.adapters.HTTPAdapter\n \n+ # Define a retry mechanism trying to attempt to not fail in the first\n+ # error. Builders hit this issue frequently because the webs are high loaded\n+ retry = Retry(\n+ total=3,\n+ read=3,\n+ connect=3,\n+ status=3,\n+ backoff_factor=0.5, # 0.5, 1, 2 seconds\n+ method_whitelist=('GET', 'PUT', 'PATCH', 'POST'),\n+ status_forcelist=(408, 413, 429, 500, 502, 503, 504),\n+ )\n+\n session.mount(\n API_HOST,\n- adapter_class(max_retries=3),\n+ adapter_class(max_retries=retry),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n", "issue": "Protect against web/api 5xx responses\nWhen a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result:\n- Builds will get stuck in triggered state\n- Builds will fail randomly when updating the api fails\n- 5xx responses will be returned from the web servers to users\n\nPart of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group.\n\nRaised from #2255 \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Simple client to access our API with Slumber credentials.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport logging\n\nimport requests\nfrom django.conf import settings\nfrom requests_toolbelt.adapters import host_header_ssl\nfrom rest_framework.renderers import JSONRenderer\nfrom slumber import API, serialize\n\nlog = logging.getLogger(__name__)\n\nPRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org')\nAPI_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org')\nUSER = getattr(settings, 'SLUMBER_USERNAME', None)\nPASS = getattr(settings, 'SLUMBER_PASSWORD', None)\n\n\nclass DrfJsonSerializer(serialize.JsonSerializer):\n\n \"\"\"Additional serialization help from the DRF renderer\"\"\"\n\n key = 'json-drf'\n\n def dumps(self, data):\n \"\"\"Used to be able to render datetime objects.\"\"\"\n return JSONRenderer().render(data)\n\n\ndef setup_api():\n session = requests.Session()\n if API_HOST.startswith('https'):\n # Only use the HostHeaderSSLAdapter for HTTPS connections\n adapter_class = host_header_ssl.HostHeaderSSLAdapter\n else:\n adapter_class = requests.adapters.HTTPAdapter\n\n session.mount(\n API_HOST,\n adapter_class(max_retries=3),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n 'base_url': '%s/api/v2/' % API_HOST,\n 'serializer': serialize.Serializer(\n default='json-drf',\n serializers=[\n serialize.JsonSerializer(),\n DrfJsonSerializer(),\n ],\n ),\n 'session': session,\n }\n if USER and PASS:\n log.debug(\n 'Using slumber v2 with user %s, pointed at %s',\n USER,\n API_HOST,\n )\n session.auth = (USER, PASS)\n else:\n log.warning('SLUMBER_USERNAME/PASSWORD settings are not set')\n return API(**api_config)\n\n\napi = setup_api()\n", "path": "readthedocs/restapi/client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Simple client to access our API with Slumber credentials.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport logging\n\nimport requests\nfrom django.conf import settings\nfrom requests.packages.urllib3.util.retry import Retry # noqa\nfrom requests_toolbelt.adapters import host_header_ssl\nfrom rest_framework.renderers import JSONRenderer\nfrom slumber import API, serialize\n\nlog = logging.getLogger(__name__)\n\nPRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org')\nAPI_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org')\nUSER = getattr(settings, 'SLUMBER_USERNAME', None)\nPASS = getattr(settings, 'SLUMBER_PASSWORD', None)\n\n\nclass DrfJsonSerializer(serialize.JsonSerializer):\n\n \"\"\"Additional serialization help from the DRF renderer\"\"\"\n\n key = 'json-drf'\n\n def dumps(self, data):\n \"\"\"Used to be able to render datetime objects.\"\"\"\n return JSONRenderer().render(data)\n\n\ndef setup_api():\n session = requests.Session()\n if API_HOST.startswith('https'):\n # Only use the HostHeaderSSLAdapter for HTTPS connections\n adapter_class = host_header_ssl.HostHeaderSSLAdapter\n else:\n adapter_class = requests.adapters.HTTPAdapter\n\n # Define a retry mechanism trying to attempt to not fail in the first\n # error. Builders hit this issue frequently because the webs are high loaded\n retry = Retry(\n total=3,\n read=3,\n connect=3,\n status=3,\n backoff_factor=0.5, # 0.5, 1, 2 seconds\n method_whitelist=('GET', 'PUT', 'PATCH', 'POST'),\n status_forcelist=(408, 413, 429, 500, 502, 503, 504),\n )\n\n session.mount(\n API_HOST,\n adapter_class(max_retries=retry),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n 'base_url': '%s/api/v2/' % API_HOST,\n 'serializer': serialize.Serializer(\n default='json-drf',\n serializers=[\n serialize.JsonSerializer(),\n DrfJsonSerializer(),\n ],\n ),\n 'session': session,\n }\n if USER and PASS:\n log.debug(\n 'Using slumber v2 with user %s, pointed at %s',\n USER,\n API_HOST,\n )\n session.auth = (USER, PASS)\n else:\n log.warning('SLUMBER_USERNAME/PASSWORD settings are not set')\n return API(**api_config)\n\n\napi = setup_api()\n", "path": "readthedocs/restapi/client.py"}]}
979
326
gh_patches_debug_5993
rasdani/github-patches
git_diff
common-workflow-language__cwltool-1504
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when building subgraph with --target Hello, I am trying to use `--target` option with this very simple workflow ``` cwltool --target step2 --debug --print-subgraph wf.cwl > subgraph.cwl ``` However I get an error `TypeError: unhashable type: 'CommentedMap'` Thanks! --- ## Expected Behavior **subgraph.cwl** ``` { "class": "Workflow", "cwlVersion": "v1.2", "id": "file:///home/wf.cwl", "inputs": [ { "id": "file:///home/wf.cwl#step2_in", "type": "string" } ], "outputs": [ { "id": "file:///home/wf.cwl#step2_out", "outputSource": "file:///home/wf.cwl#step2/output", "type": "File" } ], "steps": [ { "id": "file:///home/wf.cwl#step2", "in": [ { "id": "file:///home/wf.cwl#step2/in", "source": "file:///home/wf.cwl#step2_in" } ], "out": [ { "id": "file:///home/wf.cwl#step2/output" } ], "run": "file:///home/echo.cwl" } ] } ``` ## Actual Behavior Fails with the error as seen in [Full Traceback](#traceback) ## Workflow Code **wf.cwl** ``` class: Workflow cwlVersion: v1.2 inputs: - id: step1_in type: string - id: step2_in type: string outputs: - id: step1_out type: File outputSource: step1/output - id: step2_out type: File outputSource: step2/output steps: - id: step1 in: - id: in source: step1_in out: - id: output run: echo.cwl - id: step2 in: - id: in source: step2_in out: - id: output run: echo.cwl ``` **echo.cwl** ``` cwlVersion: v1.2 class: CommandLineTool baseCommand: echo inputs: - id: in type: string inputBinding: {position: 1} outputs: - id: output type: stdout stdout: output_echo.txt ``` **wf.yml** ``` step1_in: step1 step2_in: step2 ``` ## Full Traceback <a name="traceback"/> ``` Traceback (most recent call last): File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py", line 1020, in main ctool = choose_target(args, tool, loadingContext) File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py", line 768, in choose_target extracted = get_subgraph( File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py", line 83, in get_subgraph declare_node(nodes, out, None) File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py", line 41, in declare_node if nodeid in nodes: TypeError: unhashable type: 'CommentedMap' ``` When I am printing out the `nodeid` that is passed to `declare_node` function I see `CommentedMap` instead of `string`: ``` ordereddict([('id', 'file:///home/wf.cwl#step1/output')]) ``` After I changed this lines 96-97 in [subgraph.py](https://github.com/common-workflow-language/cwltool/blob/main/cwltool/subgraph.py) ``` #declare_node(nodes, out, None) declare_node(nodes, out["id"], None) #nodes[out].up.append(st["id"]) nodes[out["id"]].up.append(st["id"]) ``` I got the `subgraph.cwl` as shown above ## Your Environment * cwltool version: 3.0.20201203173111 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cwltool/subgraph.py` Content: ``` 1 import urllib 2 from collections import namedtuple 3 from typing import ( 4 Any, 5 Dict, 6 List, 7 Mapping, 8 MutableMapping, 9 MutableSequence, 10 Optional, 11 Set, 12 Tuple, 13 cast, 14 ) 15 16 from ruamel.yaml.comments import CommentedMap 17 18 from .utils import CWLObjectType, aslist 19 from .workflow import Workflow, WorkflowStep 20 21 Node = namedtuple("Node", ("up", "down", "type")) 22 UP = "up" 23 DOWN = "down" 24 INPUT = "input" 25 OUTPUT = "output" 26 STEP = "step" 27 28 29 def subgraph_visit( 30 current: str, 31 nodes: MutableMapping[str, Node], 32 visited: Set[str], 33 direction: str, 34 ) -> None: 35 36 if current in visited: 37 return 38 visited.add(current) 39 40 if direction == DOWN: 41 d = nodes[current].down 42 if direction == UP: 43 d = nodes[current].up 44 for c in d: 45 subgraph_visit(c, nodes, visited, direction) 46 47 48 def declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node: 49 if nodeid in nodes: 50 n = nodes[nodeid] 51 if n.type is None: 52 nodes[nodeid] = Node(n.up, n.down, tp) 53 else: 54 nodes[nodeid] = Node([], [], tp) 55 return nodes[nodeid] 56 57 58 def find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]: 59 for st in steps: 60 if st.tool["id"] == stepid: 61 return st.tool 62 return None 63 64 65 def get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap: 66 if tool.tool["class"] != "Workflow": 67 raise Exception("Can only extract subgraph from workflow") 68 69 nodes: Dict[str, Node] = {} 70 71 for inp in tool.tool["inputs"]: 72 declare_node(nodes, inp["id"], INPUT) 73 74 for out in tool.tool["outputs"]: 75 declare_node(nodes, out["id"], OUTPUT) 76 for i in aslist(out.get("outputSource", [])): 77 # source is upstream from output (dependency) 78 nodes[out["id"]].up.append(i) 79 # output is downstream from source 80 declare_node(nodes, i, None) 81 nodes[i].down.append(out["id"]) 82 83 for st in tool.tool["steps"]: 84 step = declare_node(nodes, st["id"], STEP) 85 for i in st["in"]: 86 if "source" not in i: 87 continue 88 for src in aslist(i["source"]): 89 # source is upstream from step (dependency) 90 step.up.append(src) 91 # step is downstream from source 92 declare_node(nodes, src, None) 93 nodes[src].down.append(st["id"]) 94 for out in st["out"]: 95 # output is downstream from step 96 step.down.append(out) 97 # step is upstream from output 98 declare_node(nodes, out, None) 99 nodes[out].up.append(st["id"]) 100 101 # Find all the downstream nodes from the starting points 102 visited_down: Set[str] = set() 103 for r in roots: 104 if nodes[r].type == OUTPUT: 105 subgraph_visit(r, nodes, visited_down, UP) 106 else: 107 subgraph_visit(r, nodes, visited_down, DOWN) 108 109 # Now make sure all the nodes are connected to upstream inputs 110 visited: Set[str] = set() 111 rewire: Dict[str, Tuple[str, CWLObjectType]] = {} 112 for v in visited_down: 113 visited.add(v) 114 if nodes[v].type in (STEP, OUTPUT): 115 for u in nodes[v].up: 116 if u in visited_down: 117 continue 118 if nodes[u].type == INPUT: 119 visited.add(u) 120 else: 121 # rewire 122 df = urllib.parse.urldefrag(u) 123 rn = str(df[0] + "#" + df[1].replace("/", "_")) 124 if nodes[v].type == STEP: 125 wfstep = find_step(tool.steps, v) 126 if wfstep is not None: 127 for inp in cast( 128 MutableSequence[CWLObjectType], wfstep["inputs"] 129 ): 130 if "source" in inp and u in cast( 131 CWLObjectType, inp["source"] 132 ): 133 rewire[u] = (rn, cast(CWLObjectType, inp["type"])) 134 break 135 else: 136 raise Exception("Could not find step %s" % v) 137 138 extracted = CommentedMap() 139 for f in tool.tool: 140 if f in ("steps", "inputs", "outputs"): 141 extracted[f] = [] 142 for i in tool.tool[f]: 143 if i["id"] in visited: 144 if f == "steps": 145 for inport in i["in"]: 146 if "source" not in inport: 147 continue 148 if isinstance(inport["source"], MutableSequence): 149 inport["source"] = [ 150 rewire[s][0] 151 for s in inport["source"] 152 if s in rewire 153 ] 154 elif inport["source"] in rewire: 155 inport["source"] = rewire[inport["source"]][0] 156 extracted[f].append(i) 157 else: 158 extracted[f] = tool.tool[f] 159 160 for rv in rewire.values(): 161 extracted["inputs"].append({"id": rv[0], "type": rv[1]}) 162 163 return extracted 164 165 166 def get_step(tool: Workflow, step_id: str) -> CommentedMap: 167 168 extracted = CommentedMap() 169 170 step = find_step(tool.steps, step_id) 171 if step is None: 172 raise Exception(f"Step {step_id} was not found") 173 174 extracted["steps"] = [step] 175 extracted["inputs"] = [] 176 extracted["outputs"] = [] 177 178 for inport in cast(List[CWLObjectType], step["in"]): 179 name = cast(str, inport["id"]).split("#")[-1].split("/")[-1] 180 extracted["inputs"].append({"id": name, "type": "Any"}) 181 inport["source"] = name 182 if "linkMerge" in inport: 183 del inport["linkMerge"] 184 185 for outport in cast(List[str], step["out"]): 186 name = outport.split("#")[-1].split("/")[-1] 187 extracted["outputs"].append( 188 {"id": name, "type": "Any", "outputSource": f"{step_id}/{name}"} 189 ) 190 191 for f in tool.tool: 192 if f not in ("steps", "inputs", "outputs"): 193 extracted[f] = tool.tool[f] 194 195 return extracted 196 197 198 def get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any: 199 """Return just a single Process from a Workflow step.""" 200 step = find_step(tool.steps, step_id) 201 if step is None: 202 raise Exception(f"Step {step_id} was not found") 203 204 run = step["run"] 205 206 if isinstance(run, str): 207 return index[run] 208 else: 209 return run 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py --- a/cwltool/subgraph.py +++ b/cwltool/subgraph.py @@ -92,6 +92,8 @@ declare_node(nodes, src, None) nodes[src].down.append(st["id"]) for out in st["out"]: + if isinstance(out, Mapping) and "id" in out: + out = out["id"] # output is downstream from step step.down.append(out) # step is upstream from output
{"golden_diff": "diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py\n--- a/cwltool/subgraph.py\n+++ b/cwltool/subgraph.py\n@@ -92,6 +92,8 @@\n declare_node(nodes, src, None)\n nodes[src].down.append(st[\"id\"])\n for out in st[\"out\"]:\n+ if isinstance(out, Mapping) and \"id\" in out:\n+ out = out[\"id\"]\n # output is downstream from step\n step.down.append(out)\n # step is upstream from output\n", "issue": "Error when building subgraph with --target\nHello,\r\n\r\nI am trying to use `--target` option with this very simple workflow\r\n\r\n```\r\ncwltool --target step2 --debug --print-subgraph wf.cwl > subgraph.cwl\r\n```\r\n\r\nHowever I get an error `TypeError: unhashable type: 'CommentedMap'`\r\n\r\nThanks!\r\n---\r\n\r\n## Expected Behavior\r\n**subgraph.cwl**\r\n```\r\n{\r\n \"class\": \"Workflow\",\r\n \"cwlVersion\": \"v1.2\",\r\n \"id\": \"file:///home/wf.cwl\",\r\n \"inputs\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2_in\",\r\n \"type\": \"string\"\r\n }\r\n ],\r\n \"outputs\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2_out\",\r\n \"outputSource\": \"file:///home/wf.cwl#step2/output\",\r\n \"type\": \"File\"\r\n }\r\n ],\r\n \"steps\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2\",\r\n \"in\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2/in\",\r\n \"source\": \"file:///home/wf.cwl#step2_in\"\r\n }\r\n ],\r\n \"out\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2/output\"\r\n }\r\n ],\r\n \"run\": \"file:///home/echo.cwl\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\n## Actual Behavior\r\nFails with the error as seen in [Full Traceback](#traceback)\r\n\r\n## Workflow Code\r\n**wf.cwl**\r\n```\r\nclass: Workflow\r\ncwlVersion: v1.2\r\ninputs:\r\n - id: step1_in\r\n type: string\r\n - id: step2_in\r\n type: string\r\noutputs:\r\n - id: step1_out\r\n type: File\r\n outputSource: step1/output\r\n - id: step2_out\r\n type: File\r\n outputSource: step2/output\r\nsteps:\r\n - id: step1\r\n in:\r\n - id: in\r\n source: step1_in\r\n out:\r\n - id: output\r\n run: echo.cwl\r\n - id: step2\r\n in:\r\n - id: in\r\n source: step2_in\r\n out:\r\n - id: output\r\n run: echo.cwl\r\n```\r\n**echo.cwl**\r\n```\r\ncwlVersion: v1.2\r\nclass: CommandLineTool\r\nbaseCommand: echo\r\ninputs:\r\n - id: in\r\n type: string\r\n inputBinding: {position: 1}\r\noutputs:\r\n - id: output\r\n type: stdout\r\nstdout: output_echo.txt\r\n```\r\n**wf.yml**\r\n```\r\nstep1_in: step1\r\nstep2_in: step2\r\n```\r\n\r\n## Full Traceback\r\n\r\n<a name=\"traceback\"/>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py\", line 1020, in main\r\n ctool = choose_target(args, tool, loadingContext)\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py\", line 768, in choose_target\r\n extracted = get_subgraph(\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py\", line 83, in get_subgraph\r\n declare_node(nodes, out, None)\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py\", line 41, in declare_node\r\n if nodeid in nodes:\r\nTypeError: unhashable type: 'CommentedMap' \r\n```\r\nWhen I am printing out the `nodeid` that is passed to `declare_node` function I see `CommentedMap` instead of `string`:\r\n```\r\nordereddict([('id', 'file:///home/wf.cwl#step1/output')])\r\n```\r\nAfter I changed this lines 96-97 in [subgraph.py](https://github.com/common-workflow-language/cwltool/blob/main/cwltool/subgraph.py)\r\n```\r\n#declare_node(nodes, out, None)\r\ndeclare_node(nodes, out[\"id\"], None)\r\n#nodes[out].up.append(st[\"id\"])\r\nnodes[out[\"id\"]].up.append(st[\"id\"])\r\n```\r\nI got the `subgraph.cwl` as shown above\r\n\r\n## Your Environment\r\n* cwltool version: 3.0.20201203173111\r\n\r\n\n", "before_files": [{"content": "import urllib\nfrom collections import namedtuple\nfrom typing import (\n Any,\n Dict,\n List,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n Set,\n Tuple,\n cast,\n)\n\nfrom ruamel.yaml.comments import CommentedMap\n\nfrom .utils import CWLObjectType, aslist\nfrom .workflow import Workflow, WorkflowStep\n\nNode = namedtuple(\"Node\", (\"up\", \"down\", \"type\"))\nUP = \"up\"\nDOWN = \"down\"\nINPUT = \"input\"\nOUTPUT = \"output\"\nSTEP = \"step\"\n\n\ndef subgraph_visit(\n current: str,\n nodes: MutableMapping[str, Node],\n visited: Set[str],\n direction: str,\n) -> None:\n\n if current in visited:\n return\n visited.add(current)\n\n if direction == DOWN:\n d = nodes[current].down\n if direction == UP:\n d = nodes[current].up\n for c in d:\n subgraph_visit(c, nodes, visited, direction)\n\n\ndef declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node:\n if nodeid in nodes:\n n = nodes[nodeid]\n if n.type is None:\n nodes[nodeid] = Node(n.up, n.down, tp)\n else:\n nodes[nodeid] = Node([], [], tp)\n return nodes[nodeid]\n\n\ndef find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]:\n for st in steps:\n if st.tool[\"id\"] == stepid:\n return st.tool\n return None\n\n\ndef get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:\n if tool.tool[\"class\"] != \"Workflow\":\n raise Exception(\"Can only extract subgraph from workflow\")\n\n nodes: Dict[str, Node] = {}\n\n for inp in tool.tool[\"inputs\"]:\n declare_node(nodes, inp[\"id\"], INPUT)\n\n for out in tool.tool[\"outputs\"]:\n declare_node(nodes, out[\"id\"], OUTPUT)\n for i in aslist(out.get(\"outputSource\", [])):\n # source is upstream from output (dependency)\n nodes[out[\"id\"]].up.append(i)\n # output is downstream from source\n declare_node(nodes, i, None)\n nodes[i].down.append(out[\"id\"])\n\n for st in tool.tool[\"steps\"]:\n step = declare_node(nodes, st[\"id\"], STEP)\n for i in st[\"in\"]:\n if \"source\" not in i:\n continue\n for src in aslist(i[\"source\"]):\n # source is upstream from step (dependency)\n step.up.append(src)\n # step is downstream from source\n declare_node(nodes, src, None)\n nodes[src].down.append(st[\"id\"])\n for out in st[\"out\"]:\n # output is downstream from step\n step.down.append(out)\n # step is upstream from output\n declare_node(nodes, out, None)\n nodes[out].up.append(st[\"id\"])\n\n # Find all the downstream nodes from the starting points\n visited_down: Set[str] = set()\n for r in roots:\n if nodes[r].type == OUTPUT:\n subgraph_visit(r, nodes, visited_down, UP)\n else:\n subgraph_visit(r, nodes, visited_down, DOWN)\n\n # Now make sure all the nodes are connected to upstream inputs\n visited: Set[str] = set()\n rewire: Dict[str, Tuple[str, CWLObjectType]] = {}\n for v in visited_down:\n visited.add(v)\n if nodes[v].type in (STEP, OUTPUT):\n for u in nodes[v].up:\n if u in visited_down:\n continue\n if nodes[u].type == INPUT:\n visited.add(u)\n else:\n # rewire\n df = urllib.parse.urldefrag(u)\n rn = str(df[0] + \"#\" + df[1].replace(\"/\", \"_\"))\n if nodes[v].type == STEP:\n wfstep = find_step(tool.steps, v)\n if wfstep is not None:\n for inp in cast(\n MutableSequence[CWLObjectType], wfstep[\"inputs\"]\n ):\n if \"source\" in inp and u in cast(\n CWLObjectType, inp[\"source\"]\n ):\n rewire[u] = (rn, cast(CWLObjectType, inp[\"type\"]))\n break\n else:\n raise Exception(\"Could not find step %s\" % v)\n\n extracted = CommentedMap()\n for f in tool.tool:\n if f in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = []\n for i in tool.tool[f]:\n if i[\"id\"] in visited:\n if f == \"steps\":\n for inport in i[\"in\"]:\n if \"source\" not in inport:\n continue\n if isinstance(inport[\"source\"], MutableSequence):\n inport[\"source\"] = [\n rewire[s][0]\n for s in inport[\"source\"]\n if s in rewire\n ]\n elif inport[\"source\"] in rewire:\n inport[\"source\"] = rewire[inport[\"source\"]][0]\n extracted[f].append(i)\n else:\n extracted[f] = tool.tool[f]\n\n for rv in rewire.values():\n extracted[\"inputs\"].append({\"id\": rv[0], \"type\": rv[1]})\n\n return extracted\n\n\ndef get_step(tool: Workflow, step_id: str) -> CommentedMap:\n\n extracted = CommentedMap()\n\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n extracted[\"steps\"] = [step]\n extracted[\"inputs\"] = []\n extracted[\"outputs\"] = []\n\n for inport in cast(List[CWLObjectType], step[\"in\"]):\n name = cast(str, inport[\"id\"]).split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"inputs\"].append({\"id\": name, \"type\": \"Any\"})\n inport[\"source\"] = name\n if \"linkMerge\" in inport:\n del inport[\"linkMerge\"]\n\n for outport in cast(List[str], step[\"out\"]):\n name = outport.split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"outputs\"].append(\n {\"id\": name, \"type\": \"Any\", \"outputSource\": f\"{step_id}/{name}\"}\n )\n\n for f in tool.tool:\n if f not in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = tool.tool[f]\n\n return extracted\n\n\ndef get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any:\n \"\"\"Return just a single Process from a Workflow step.\"\"\"\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n run = step[\"run\"]\n\n if isinstance(run, str):\n return index[run]\n else:\n return run\n", "path": "cwltool/subgraph.py"}], "after_files": [{"content": "import urllib\nfrom collections import namedtuple\nfrom typing import (\n Any,\n Dict,\n List,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n Set,\n Tuple,\n cast,\n)\n\nfrom ruamel.yaml.comments import CommentedMap\n\nfrom .utils import CWLObjectType, aslist\nfrom .workflow import Workflow, WorkflowStep\n\nNode = namedtuple(\"Node\", (\"up\", \"down\", \"type\"))\nUP = \"up\"\nDOWN = \"down\"\nINPUT = \"input\"\nOUTPUT = \"output\"\nSTEP = \"step\"\n\n\ndef subgraph_visit(\n current: str,\n nodes: MutableMapping[str, Node],\n visited: Set[str],\n direction: str,\n) -> None:\n\n if current in visited:\n return\n visited.add(current)\n\n if direction == DOWN:\n d = nodes[current].down\n if direction == UP:\n d = nodes[current].up\n for c in d:\n subgraph_visit(c, nodes, visited, direction)\n\n\ndef declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node:\n if nodeid in nodes:\n n = nodes[nodeid]\n if n.type is None:\n nodes[nodeid] = Node(n.up, n.down, tp)\n else:\n nodes[nodeid] = Node([], [], tp)\n return nodes[nodeid]\n\n\ndef find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]:\n for st in steps:\n if st.tool[\"id\"] == stepid:\n return st.tool\n return None\n\n\ndef get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:\n if tool.tool[\"class\"] != \"Workflow\":\n raise Exception(\"Can only extract subgraph from workflow\")\n\n nodes: Dict[str, Node] = {}\n\n for inp in tool.tool[\"inputs\"]:\n declare_node(nodes, inp[\"id\"], INPUT)\n\n for out in tool.tool[\"outputs\"]:\n declare_node(nodes, out[\"id\"], OUTPUT)\n for i in aslist(out.get(\"outputSource\", [])):\n # source is upstream from output (dependency)\n nodes[out[\"id\"]].up.append(i)\n # output is downstream from source\n declare_node(nodes, i, None)\n nodes[i].down.append(out[\"id\"])\n\n for st in tool.tool[\"steps\"]:\n step = declare_node(nodes, st[\"id\"], STEP)\n for i in st[\"in\"]:\n if \"source\" not in i:\n continue\n for src in aslist(i[\"source\"]):\n # source is upstream from step (dependency)\n step.up.append(src)\n # step is downstream from source\n declare_node(nodes, src, None)\n nodes[src].down.append(st[\"id\"])\n for out in st[\"out\"]:\n if isinstance(out, Mapping) and \"id\" in out:\n out = out[\"id\"]\n # output is downstream from step\n step.down.append(out)\n # step is upstream from output\n declare_node(nodes, out, None)\n nodes[out].up.append(st[\"id\"])\n\n # Find all the downstream nodes from the starting points\n visited_down: Set[str] = set()\n for r in roots:\n if nodes[r].type == OUTPUT:\n subgraph_visit(r, nodes, visited_down, UP)\n else:\n subgraph_visit(r, nodes, visited_down, DOWN)\n\n # Now make sure all the nodes are connected to upstream inputs\n visited: Set[str] = set()\n rewire: Dict[str, Tuple[str, CWLObjectType]] = {}\n for v in visited_down:\n visited.add(v)\n if nodes[v].type in (STEP, OUTPUT):\n for u in nodes[v].up:\n if u in visited_down:\n continue\n if nodes[u].type == INPUT:\n visited.add(u)\n else:\n # rewire\n df = urllib.parse.urldefrag(u)\n rn = str(df[0] + \"#\" + df[1].replace(\"/\", \"_\"))\n if nodes[v].type == STEP:\n wfstep = find_step(tool.steps, v)\n if wfstep is not None:\n for inp in cast(\n MutableSequence[CWLObjectType], wfstep[\"inputs\"]\n ):\n if \"source\" in inp and u in cast(\n CWLObjectType, inp[\"source\"]\n ):\n rewire[u] = (rn, cast(CWLObjectType, inp[\"type\"]))\n break\n else:\n raise Exception(\"Could not find step %s\" % v)\n\n extracted = CommentedMap()\n for f in tool.tool:\n if f in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = []\n for i in tool.tool[f]:\n if i[\"id\"] in visited:\n if f == \"steps\":\n for inport in i[\"in\"]:\n if \"source\" not in inport:\n continue\n if isinstance(inport[\"source\"], MutableSequence):\n inport[\"source\"] = [\n rewire[s][0]\n for s in inport[\"source\"]\n if s in rewire\n ]\n elif inport[\"source\"] in rewire:\n inport[\"source\"] = rewire[inport[\"source\"]][0]\n extracted[f].append(i)\n else:\n extracted[f] = tool.tool[f]\n\n for rv in rewire.values():\n extracted[\"inputs\"].append({\"id\": rv[0], \"type\": rv[1]})\n\n return extracted\n\n\ndef get_step(tool: Workflow, step_id: str) -> CommentedMap:\n\n extracted = CommentedMap()\n\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n extracted[\"steps\"] = [step]\n extracted[\"inputs\"] = []\n extracted[\"outputs\"] = []\n\n for inport in cast(List[CWLObjectType], step[\"in\"]):\n name = cast(str, inport[\"id\"]).split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"inputs\"].append({\"id\": name, \"type\": \"Any\"})\n inport[\"source\"] = name\n if \"linkMerge\" in inport:\n del inport[\"linkMerge\"]\n\n for outport in cast(List[str], step[\"out\"]):\n name = outport.split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"outputs\"].append(\n {\"id\": name, \"type\": \"Any\", \"outputSource\": f\"{step_id}/{name}\"}\n )\n\n for f in tool.tool:\n if f not in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = tool.tool[f]\n\n return extracted\n\n\ndef get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any:\n \"\"\"Return just a single Process from a Workflow step.\"\"\"\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n run = step[\"run\"]\n\n if isinstance(run, str):\n return index[run]\n else:\n return run\n", "path": "cwltool/subgraph.py"}]}
3,326
120
gh_patches_debug_6546
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2645
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider texas_roadhouse is broken During the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/texas_roadhouse.py` Content: ``` 1 import json 2 import scrapy 3 from locations.items import GeojsonPointItem 4 from locations.hours import OpeningHours 5 6 7 class TexasRoadhouseSpider(scrapy.Spider): 8 name = "texas_roadhouse" 9 item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" } 10 allowed_domains = ["www.texasroadhouse.com"] 11 start_urls = ( 12 'https://www.texasroadhouse.com/sitemap.xml', 13 ) 14 15 def parse_hours(self, store_hours): 16 opening_hours = OpeningHours() 17 18 for weekday in store_hours: 19 # convert day from full Monday to Mo, etc 20 day = weekday.get('day')[:2] 21 open_time = weekday.get('hours').get('openTime') 22 close_time = weekday.get('hours').get('closeTime') 23 opening_hours.add_range(day=day, 24 open_time=open_time, 25 close_time=close_time, 26 time_format='%I:%M%p') 27 28 return opening_hours.as_opening_hours() 29 30 def parse(self, response): 31 response.selector.remove_namespaces() 32 city_urls = response.xpath('//url/loc/text()').extract() 33 for path in city_urls: 34 if path.startswith('https://www.texasroadhouse.com/locations/'): 35 yield scrapy.Request( 36 path.strip(), 37 callback=self.parse_store, 38 ) 39 40 def parse_store(self, response): 41 data = json.loads(response.xpath('//script/text()').extract_first()[22:-1]) 42 43 properties = { 44 'lat': data['latitude'], 45 'lon': data['longitude'], 46 'ref': data['url'], 47 'name': data['name'], 48 'addr_full': data['address1'], 49 'city': data['city'], 50 'state': data['state'], 51 'postcode': data['postalCode'], 52 'country': data['countryCode'], 53 'phone': data['telephone'], 54 'website': response.urljoin(data['url']), 55 'opening_hours': self.parse_hours(data['schedule']), 56 } 57 58 yield GeojsonPointItem(**properties) 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py --- a/locations/spiders/texas_roadhouse.py +++ b/locations/spiders/texas_roadhouse.py @@ -6,7 +6,10 @@ class TexasRoadhouseSpider(scrapy.Spider): name = "texas_roadhouse" - item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" } + item_attributes = { + 'brand': "Texas Roadhouse", + 'brand_wikidata': "Q7707945", + } allowed_domains = ["www.texasroadhouse.com"] start_urls = ( 'https://www.texasroadhouse.com/sitemap.xml',
{"golden_diff": "diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py\n--- a/locations/spiders/texas_roadhouse.py\n+++ b/locations/spiders/texas_roadhouse.py\n@@ -6,7 +6,10 @@\n \n class TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n- item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n+ item_attributes = {\n+ 'brand': \"Texas Roadhouse\",\n+ 'brand_wikidata': \"Q7707945\",\n+ }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n", "issue": "Spider texas_roadhouse is broken\nDuring the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n open_time = weekday.get('hours').get('openTime')\n close_time = weekday.get('hours').get('closeTime')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.startswith('https://www.texasroadhouse.com/locations/'):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])\n\n properties = {\n 'lat': data['latitude'],\n 'lon': data['longitude'],\n 'ref': data['url'],\n 'name': data['name'],\n 'addr_full': data['address1'],\n 'city': data['city'],\n 'state': data['state'],\n 'postcode': data['postalCode'],\n 'country': data['countryCode'],\n 'phone': data['telephone'],\n 'website': response.urljoin(data['url']),\n 'opening_hours': self.parse_hours(data['schedule']),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/texas_roadhouse.py"}], "after_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n item_attributes = {\n 'brand': \"Texas Roadhouse\",\n 'brand_wikidata': \"Q7707945\",\n }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n open_time = weekday.get('hours').get('openTime')\n close_time = weekday.get('hours').get('closeTime')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.startswith('https://www.texasroadhouse.com/locations/'):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])\n\n properties = {\n 'lat': data['latitude'],\n 'lon': data['longitude'],\n 'ref': data['url'],\n 'name': data['name'],\n 'addr_full': data['address1'],\n 'city': data['city'],\n 'state': data['state'],\n 'postcode': data['postalCode'],\n 'country': data['countryCode'],\n 'phone': data['telephone'],\n 'website': response.urljoin(data['url']),\n 'opening_hours': self.parse_hours(data['schedule']),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/texas_roadhouse.py"}]}
1,014
183
gh_patches_debug_4587
rasdani/github-patches
git_diff
ipython__ipython-11722
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing ship numpy testing decorator ``` Traceback (most recent call last): File "/Users/mbussonnier/dev/cpython/test/bin/iptest", line 6, in <module> from IPython.testing.iptestcontroller import main File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py", line 23, in <module> from .iptest import ( File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py", line 40, in <module> from IPython.external.decorators import KnownFailure, dec ImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py) ``` Seem like `dec` is not define ini out `_decorator.py` Appologies for shortness boarding a plane --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/external/decorators/__init__.py` Content: ``` 1 try: 2 from numpy.testing import * 3 from numpy.testing import dec 4 from numpy.testing.noseclasses import KnownFailure 5 except ImportError: 6 from ._decorators import * 7 try: 8 from ._numpy_testing_noseclasses import KnownFailure 9 except ImportError: 10 pass 11 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py --- a/IPython/external/decorators/__init__.py +++ b/IPython/external/decorators/__init__.py @@ -1,9 +1,7 @@ try: - from numpy.testing import * - from numpy.testing import dec - from numpy.testing.noseclasses import KnownFailure + from numpy.testing.noseclasses import KnownFailure, knownfailureif except ImportError: - from ._decorators import * + from ._decorators import knownfailureif try: from ._numpy_testing_noseclasses import KnownFailure except ImportError:
{"golden_diff": "diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py\n--- a/IPython/external/decorators/__init__.py\n+++ b/IPython/external/decorators/__init__.py\n@@ -1,9 +1,7 @@\n try:\n- from numpy.testing import *\n- from numpy.testing import dec\n- from numpy.testing.noseclasses import KnownFailure\n+ from numpy.testing.noseclasses import KnownFailure, knownfailureif\n except ImportError:\n- from ._decorators import *\n+ from ._decorators import knownfailureif\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n", "issue": "Missing ship numpy testing decorator\n```\r\nTraceback (most recent call last):\r\n File \"/Users/mbussonnier/dev/cpython/test/bin/iptest\", line 6, in <module>\r\n from IPython.testing.iptestcontroller import main\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py\", line 23, in <module>\r\n from .iptest import (\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py\", line 40, in <module>\r\n from IPython.external.decorators import KnownFailure, dec\r\nImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py)\r\n```\r\n\r\nSeem like `dec` is not define ini out `_decorator.py` \r\n\r\n\r\nAppologies for shortness boarding a plane\n", "before_files": [{"content": "try:\n from numpy.testing import *\n from numpy.testing import dec\n from numpy.testing.noseclasses import KnownFailure\nexcept ImportError:\n from ._decorators import *\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n pass\n", "path": "IPython/external/decorators/__init__.py"}], "after_files": [{"content": "try:\n from numpy.testing.noseclasses import KnownFailure, knownfailureif\nexcept ImportError:\n from ._decorators import knownfailureif\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n pass\n", "path": "IPython/external/decorators/__init__.py"}]}
557
150
gh_patches_debug_17485
rasdani/github-patches
git_diff
kymatio__kymatio-183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [MAINT] AttributeError: module 'kymatio' has no attribute '__version__' This came up in #172 PEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module At the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module) https://www.python.org/dev/peps/pep-0396/ Will submit a PR later --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kymatio/__init__.py` Content: ``` 1 __all__ = [ 2 'Scattering3D', 3 'Scattering2D', 4 'Scattering1D' 5 ] 6 7 from .scattering2d.scattering2d import Scattering2D 8 from .scattering1d.scattering1d import Scattering1D 9 from .scattering3d.scattering3d import Scattering3D 10 11 # Make sure that DeprecationWarning within this package always gets printed 12 ### Snippet copied from sklearn.__init__ 13 import warnings 14 import re 15 warnings.filterwarnings('always', category=DeprecationWarning, 16 module=r'^{0}.*'.format(re.escape(__name__))) 17 ### End Snippet 18 19 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kymatio/__init__.py b/kymatio/__init__.py --- a/kymatio/__init__.py +++ b/kymatio/__init__.py @@ -1,13 +1,3 @@ -__all__ = [ - 'Scattering3D', - 'Scattering2D', - 'Scattering1D' - ] - -from .scattering2d.scattering2d import Scattering2D -from .scattering1d.scattering1d import Scattering1D -from .scattering3d.scattering3d import Scattering3D - # Make sure that DeprecationWarning within this package always gets printed ### Snippet copied from sklearn.__init__ import warnings @@ -16,4 +6,14 @@ module=r'^{0}.*'.format(re.escape(__name__))) ### End Snippet +__all__ = [ + 'Scattering1D', + 'Scattering2D', + 'Scattering3D' + ] + +from .scattering1d.scattering1d import Scattering1D +from .scattering2d.scattering2d import Scattering2D +from .scattering3d.scattering3d import Scattering3D +from .version import version as __version__
{"golden_diff": "diff --git a/kymatio/__init__.py b/kymatio/__init__.py\n--- a/kymatio/__init__.py\n+++ b/kymatio/__init__.py\n@@ -1,13 +1,3 @@\n-__all__ = [\n- 'Scattering3D',\n- 'Scattering2D',\n- 'Scattering1D'\n- ]\n-\n-from .scattering2d.scattering2d import Scattering2D\n-from .scattering1d.scattering1d import Scattering1D\n-from .scattering3d.scattering3d import Scattering3D\n-\n # Make sure that DeprecationWarning within this package always gets printed\n ### Snippet copied from sklearn.__init__\n import warnings\n@@ -16,4 +6,14 @@\n module=r'^{0}.*'.format(re.escape(__name__)))\n ### End Snippet\n \n+__all__ = [\n+ 'Scattering1D',\n+ 'Scattering2D',\n+ 'Scattering3D'\n+ ]\n+\n+from .scattering1d.scattering1d import Scattering1D\n+from .scattering2d.scattering2d import Scattering2D\n+from .scattering3d.scattering3d import Scattering3D\n \n+from .version import version as __version__\n", "issue": "[MAINT] AttributeError: module 'kymatio' has no attribute '__version__'\nThis came up in #172\r\n\r\nPEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module\r\nAt the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module)\r\nhttps://www.python.org/dev/peps/pep-0396/\r\n\r\nWill submit a PR later\r\n\n", "before_files": [{"content": "__all__ = [\n 'Scattering3D',\n 'Scattering2D',\n 'Scattering1D'\n ]\n\nfrom .scattering2d.scattering2d import Scattering2D\nfrom .scattering1d.scattering1d import Scattering1D\nfrom .scattering3d.scattering3d import Scattering3D\n\n# Make sure that DeprecationWarning within this package always gets printed\n### Snippet copied from sklearn.__init__\nimport warnings\nimport re\nwarnings.filterwarnings('always', category=DeprecationWarning,\n module=r'^{0}.*'.format(re.escape(__name__)))\n### End Snippet\n\n\n", "path": "kymatio/__init__.py"}], "after_files": [{"content": "# Make sure that DeprecationWarning within this package always gets printed\n### Snippet copied from sklearn.__init__\nimport warnings\nimport re\nwarnings.filterwarnings('always', category=DeprecationWarning,\n module=r'^{0}.*'.format(re.escape(__name__)))\n### End Snippet\n\n__all__ = [\n 'Scattering1D',\n 'Scattering2D',\n 'Scattering3D'\n ]\n\nfrom .scattering1d.scattering1d import Scattering1D\nfrom .scattering2d.scattering2d import Scattering2D\nfrom .scattering3d.scattering3d import Scattering3D\n\nfrom .version import version as __version__\n", "path": "kymatio/__init__.py"}]}
553
289
gh_patches_debug_5270
rasdani/github-patches
git_diff
streamlit__streamlit-3949
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix wrong Streamlit static app for st.caption() doc **Link to doc page in question (if any):** [[click here]](https://docs.streamlit.io/library/api-reference/text/st.caption) **Name of the Streamlit feature whose docs need improvement:** `st.caption()` **What you think the docs should say:** The Streamlit static app below should show how `st.caption()` works - it currently shows how `st.title()` work <img src="https://user-images.githubusercontent.com/7164864/137554438-77380317-1267-4840-9521-36f86e32b456.png" width="300px"> ➡️ Code is [here](https://github.com/streamlit/streamlit/blob/267bf98d9a8f8d94cf6d3e9f0ea8866233d4836e/lib/streamlit/elements/markdown.py#L221) but I have no idea how to replace that URL with a correct one --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/streamlit/elements/markdown.py` Content: ``` 1 # Copyright 2018-2021 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import cast 16 17 import streamlit 18 from streamlit import type_util 19 from streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto 20 from .utils import clean_text 21 22 23 class MarkdownMixin: 24 def markdown(self, body, unsafe_allow_html=False): 25 """Display string formatted as Markdown. 26 27 Parameters 28 ---------- 29 body : str 30 The string to display as Github-flavored Markdown. Syntax 31 information can be found at: https://github.github.com/gfm. 32 33 This also supports: 34 35 * Emoji shortcodes, such as `:+1:` and `:sunglasses:`. 36 For a list of all supported codes, 37 see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json. 38 39 * LaTeX expressions, by wrapping them in "$" or "$$" (the "$$" 40 must be on their own lines). Supported LaTeX functions are listed 41 at https://katex.org/docs/supported.html. 42 43 unsafe_allow_html : bool 44 By default, any HTML tags found in the body will be escaped and 45 therefore treated as pure text. This behavior may be turned off by 46 setting this argument to True. 47 48 That said, we *strongly advise against it*. It is hard to write 49 secure HTML, so by using this argument you may be compromising your 50 users' security. For more information, see: 51 52 https://github.com/streamlit/streamlit/issues/152 53 54 *Also note that `unsafe_allow_html` is a temporary measure and may 55 be removed from Streamlit at any time.* 56 57 If you decide to turn on HTML anyway, we ask you to please tell us 58 your exact use case here: 59 60 https://discuss.streamlit.io/t/96 61 62 This will help us come up with safe APIs that allow you to do what 63 you want. 64 65 Example 66 ------- 67 >>> st.markdown('Streamlit is **_really_ cool**.') 68 69 .. output:: 70 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS 71 height: 50px 72 73 """ 74 markdown_proto = MarkdownProto() 75 76 markdown_proto.body = clean_text(body) 77 markdown_proto.allow_html = unsafe_allow_html 78 79 return self.dg._enqueue("markdown", markdown_proto) 80 81 def header(self, body, anchor=None): 82 """Display text in header formatting. 83 84 Parameters 85 ---------- 86 body : str 87 The text to display. 88 89 anchor : str 90 The anchor name of the header that can be accessed with #anchor 91 in the URL. If omitted, it generates an anchor using the body. 92 93 Example 94 ------- 95 >>> st.header('This is a header') 96 97 .. output:: 98 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj 99 height: 100px 100 101 """ 102 header_proto = MarkdownProto() 103 if anchor is None: 104 header_proto.body = f"## {clean_text(body)}" 105 else: 106 header_proto.body = f'<h2 data-anchor="{anchor}">{clean_text(body)}</h2>' 107 header_proto.allow_html = True 108 return self.dg._enqueue("markdown", header_proto) 109 110 def subheader(self, body, anchor=None): 111 """Display text in subheader formatting. 112 113 Parameters 114 ---------- 115 body : str 116 The text to display. 117 118 anchor : str 119 The anchor name of the header that can be accessed with #anchor 120 in the URL. If omitted, it generates an anchor using the body. 121 122 Example 123 ------- 124 >>> st.subheader('This is a subheader') 125 126 .. output:: 127 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ 128 height: 100px 129 130 """ 131 subheader_proto = MarkdownProto() 132 if anchor is None: 133 subheader_proto.body = f"### {clean_text(body)}" 134 else: 135 subheader_proto.body = f'<h3 data-anchor="{anchor}">{clean_text(body)}</h3>' 136 subheader_proto.allow_html = True 137 138 return self.dg._enqueue("markdown", subheader_proto) 139 140 def code(self, body, language="python"): 141 """Display a code block with optional syntax highlighting. 142 143 (This is a convenience wrapper around `st.markdown()`) 144 145 Parameters 146 ---------- 147 body : str 148 The string to display as code. 149 150 language : str 151 The language that the code is written in, for syntax highlighting. 152 If omitted, the code will be unstyled. 153 154 Example 155 ------- 156 >>> code = '''def hello(): 157 ... print("Hello, Streamlit!")''' 158 >>> st.code(code, language='python') 159 160 .. output:: 161 https://static.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2 162 height: 100px 163 164 """ 165 code_proto = MarkdownProto() 166 markdown = "```%(language)s\n%(body)s\n```" % { 167 "language": language or "", 168 "body": body, 169 } 170 code_proto.body = clean_text(markdown) 171 return self.dg._enqueue("markdown", code_proto) 172 173 def title(self, body, anchor=None): 174 """Display text in title formatting. 175 176 Each document should have a single `st.title()`, although this is not 177 enforced. 178 179 Parameters 180 ---------- 181 body : str 182 The text to display. 183 184 anchor : str 185 The anchor name of the header that can be accessed with #anchor 186 in the URL. If omitted, it generates an anchor using the body. 187 188 Example 189 ------- 190 >>> st.title('This is a title') 191 192 .. output:: 193 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj 194 height: 100px 195 196 """ 197 title_proto = MarkdownProto() 198 if anchor is None: 199 title_proto.body = f"# {clean_text(body)}" 200 else: 201 title_proto.body = f'<h1 data-anchor="{anchor}">{clean_text(body)}</h1>' 202 title_proto.allow_html = True 203 return self.dg._enqueue("markdown", title_proto) 204 205 def caption(self, body): 206 """Display text in small font. 207 208 This should be used for captions, asides, footnotes, sidenotes, and 209 other explanatory text. 210 211 Parameters 212 ---------- 213 body : str 214 The text to display. 215 216 Example 217 ------- 218 >>> st.caption('This is a string that explains something above.') 219 220 .. output:: 221 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj 222 height: 100px 223 224 """ 225 caption_proto = MarkdownProto() 226 caption_proto.body = body 227 caption_proto.allow_html = False 228 caption_proto.is_caption = True 229 return self.dg._enqueue("markdown", caption_proto) 230 231 def latex(self, body): 232 # This docstring needs to be "raw" because of the backslashes in the 233 # example below. 234 r"""Display mathematical expressions formatted as LaTeX. 235 236 Supported LaTeX functions are listed at 237 https://katex.org/docs/supported.html. 238 239 Parameters 240 ---------- 241 body : str or SymPy expression 242 The string or SymPy expression to display as LaTeX. If str, it's 243 a good idea to use raw Python strings since LaTeX uses backslashes 244 a lot. 245 246 247 Example 248 ------- 249 >>> st.latex(r''' 250 ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} = 251 ... \sum_{k=0}^{n-1} ar^k = 252 ... a \left(\frac{1-r^{n}}{1-r}\right) 253 ... ''') 254 255 .. output:: 256 https://static.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4 257 height: 75px 258 259 """ 260 if type_util.is_sympy_expession(body): 261 import sympy 262 263 body = sympy.latex(body) 264 265 latex_proto = MarkdownProto() 266 latex_proto.body = "$$\n%s\n$$" % clean_text(body) 267 return self.dg._enqueue("markdown", latex_proto) 268 269 @property 270 def dg(self) -> "streamlit.delta_generator.DeltaGenerator": 271 """Get our DeltaGenerator.""" 272 return cast("streamlit.delta_generator.DeltaGenerator", self) 273 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/streamlit/elements/markdown.py b/lib/streamlit/elements/markdown.py --- a/lib/streamlit/elements/markdown.py +++ b/lib/streamlit/elements/markdown.py @@ -218,8 +218,8 @@ >>> st.caption('This is a string that explains something above.') .. output:: - https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj - height: 100px + https://static.streamlit.io/1.1.0-eQCi/index.html?id=SVQb16b2UDZX4W8VLkEJLJ + height: 175px """ caption_proto = MarkdownProto()
{"golden_diff": "diff --git a/lib/streamlit/elements/markdown.py b/lib/streamlit/elements/markdown.py\n--- a/lib/streamlit/elements/markdown.py\n+++ b/lib/streamlit/elements/markdown.py\n@@ -218,8 +218,8 @@\n >>> st.caption('This is a string that explains something above.')\n \n .. output::\n- https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n- height: 100px\n+ https://static.streamlit.io/1.1.0-eQCi/index.html?id=SVQb16b2UDZX4W8VLkEJLJ\n+ height: 175px\n \n \"\"\"\n caption_proto = MarkdownProto()\n", "issue": "Fix wrong Streamlit static app for st.caption() doc\n**Link to doc page in question (if any):**\r\n\r\n[[click here]](https://docs.streamlit.io/library/api-reference/text/st.caption)\r\n\r\n**Name of the Streamlit feature whose docs need improvement:**\r\n`st.caption()`\r\n\r\n**What you think the docs should say:**\r\nThe Streamlit static app below should show how `st.caption()` works - it currently shows how `st.title()` work\r\n\r\n<img src=\"https://user-images.githubusercontent.com/7164864/137554438-77380317-1267-4840-9521-36f86e32b456.png\" width=\"300px\">\r\n\r\n\u27a1\ufe0f Code is [here](https://github.com/streamlit/streamlit/blob/267bf98d9a8f8d94cf6d3e9f0ea8866233d4836e/lib/streamlit/elements/markdown.py#L221) but I have no idea how to replace that URL with a correct one\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit import type_util\nfrom streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto\nfrom .utils import clean_text\n\n\nclass MarkdownMixin:\n def markdown(self, body, unsafe_allow_html=False):\n \"\"\"Display string formatted as Markdown.\n\n Parameters\n ----------\n body : str\n The string to display as Github-flavored Markdown. Syntax\n information can be found at: https://github.github.com/gfm.\n\n This also supports:\n\n * Emoji shortcodes, such as `:+1:` and `:sunglasses:`.\n For a list of all supported codes,\n see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json.\n\n * LaTeX expressions, by wrapping them in \"$\" or \"$$\" (the \"$$\"\n must be on their own lines). Supported LaTeX functions are listed\n at https://katex.org/docs/supported.html.\n\n unsafe_allow_html : bool\n By default, any HTML tags found in the body will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, we *strongly advise against it*. It is hard to write\n secure HTML, so by using this argument you may be compromising your\n users' security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n *Also note that `unsafe_allow_html` is a temporary measure and may\n be removed from Streamlit at any time.*\n\n If you decide to turn on HTML anyway, we ask you to please tell us\n your exact use case here:\n\n https://discuss.streamlit.io/t/96\n\n This will help us come up with safe APIs that allow you to do what\n you want.\n\n Example\n -------\n >>> st.markdown('Streamlit is **_really_ cool**.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS\n height: 50px\n\n \"\"\"\n markdown_proto = MarkdownProto()\n\n markdown_proto.body = clean_text(body)\n markdown_proto.allow_html = unsafe_allow_html\n\n return self.dg._enqueue(\"markdown\", markdown_proto)\n\n def header(self, body, anchor=None):\n \"\"\"Display text in header formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.header('This is a header')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj\n height: 100px\n\n \"\"\"\n header_proto = MarkdownProto()\n if anchor is None:\n header_proto.body = f\"## {clean_text(body)}\"\n else:\n header_proto.body = f'<h2 data-anchor=\"{anchor}\">{clean_text(body)}</h2>'\n header_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", header_proto)\n\n def subheader(self, body, anchor=None):\n \"\"\"Display text in subheader formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.subheader('This is a subheader')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ\n height: 100px\n\n \"\"\"\n subheader_proto = MarkdownProto()\n if anchor is None:\n subheader_proto.body = f\"### {clean_text(body)}\"\n else:\n subheader_proto.body = f'<h3 data-anchor=\"{anchor}\">{clean_text(body)}</h3>'\n subheader_proto.allow_html = True\n\n return self.dg._enqueue(\"markdown\", subheader_proto)\n\n def code(self, body, language=\"python\"):\n \"\"\"Display a code block with optional syntax highlighting.\n\n (This is a convenience wrapper around `st.markdown()`)\n\n Parameters\n ----------\n body : str\n The string to display as code.\n\n language : str\n The language that the code is written in, for syntax highlighting.\n If omitted, the code will be unstyled.\n\n Example\n -------\n >>> code = '''def hello():\n ... print(\"Hello, Streamlit!\")'''\n >>> st.code(code, language='python')\n\n .. output::\n https://static.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2\n height: 100px\n\n \"\"\"\n code_proto = MarkdownProto()\n markdown = \"```%(language)s\\n%(body)s\\n```\" % {\n \"language\": language or \"\",\n \"body\": body,\n }\n code_proto.body = clean_text(markdown)\n return self.dg._enqueue(\"markdown\", code_proto)\n\n def title(self, body, anchor=None):\n \"\"\"Display text in title formatting.\n\n Each document should have a single `st.title()`, although this is not\n enforced.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.title('This is a title')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n title_proto = MarkdownProto()\n if anchor is None:\n title_proto.body = f\"# {clean_text(body)}\"\n else:\n title_proto.body = f'<h1 data-anchor=\"{anchor}\">{clean_text(body)}</h1>'\n title_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", title_proto)\n\n def caption(self, body):\n \"\"\"Display text in small font.\n\n This should be used for captions, asides, footnotes, sidenotes, and\n other explanatory text.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.caption('This is a string that explains something above.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n caption_proto = MarkdownProto()\n caption_proto.body = body\n caption_proto.allow_html = False\n caption_proto.is_caption = True\n return self.dg._enqueue(\"markdown\", caption_proto)\n\n def latex(self, body):\n # This docstring needs to be \"raw\" because of the backslashes in the\n # example below.\n r\"\"\"Display mathematical expressions formatted as LaTeX.\n\n Supported LaTeX functions are listed at\n https://katex.org/docs/supported.html.\n\n Parameters\n ----------\n body : str or SymPy expression\n The string or SymPy expression to display as LaTeX. If str, it's\n a good idea to use raw Python strings since LaTeX uses backslashes\n a lot.\n\n\n Example\n -------\n >>> st.latex(r'''\n ... a + ar + a r^2 + a r^3 + \\cdots + a r^{n-1} =\n ... \\sum_{k=0}^{n-1} ar^k =\n ... a \\left(\\frac{1-r^{n}}{1-r}\\right)\n ... ''')\n\n .. output::\n https://static.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4\n height: 75px\n\n \"\"\"\n if type_util.is_sympy_expession(body):\n import sympy\n\n body = sympy.latex(body)\n\n latex_proto = MarkdownProto()\n latex_proto.body = \"$$\\n%s\\n$$\" % clean_text(body)\n return self.dg._enqueue(\"markdown\", latex_proto)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/markdown.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit import type_util\nfrom streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto\nfrom .utils import clean_text\n\n\nclass MarkdownMixin:\n def markdown(self, body, unsafe_allow_html=False):\n \"\"\"Display string formatted as Markdown.\n\n Parameters\n ----------\n body : str\n The string to display as Github-flavored Markdown. Syntax\n information can be found at: https://github.github.com/gfm.\n\n This also supports:\n\n * Emoji shortcodes, such as `:+1:` and `:sunglasses:`.\n For a list of all supported codes,\n see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json.\n\n * LaTeX expressions, by wrapping them in \"$\" or \"$$\" (the \"$$\"\n must be on their own lines). Supported LaTeX functions are listed\n at https://katex.org/docs/supported.html.\n\n unsafe_allow_html : bool\n By default, any HTML tags found in the body will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, we *strongly advise against it*. It is hard to write\n secure HTML, so by using this argument you may be compromising your\n users' security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n *Also note that `unsafe_allow_html` is a temporary measure and may\n be removed from Streamlit at any time.*\n\n If you decide to turn on HTML anyway, we ask you to please tell us\n your exact use case here:\n\n https://discuss.streamlit.io/t/96\n\n This will help us come up with safe APIs that allow you to do what\n you want.\n\n Example\n -------\n >>> st.markdown('Streamlit is **_really_ cool**.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS\n height: 50px\n\n \"\"\"\n markdown_proto = MarkdownProto()\n\n markdown_proto.body = clean_text(body)\n markdown_proto.allow_html = unsafe_allow_html\n\n return self.dg._enqueue(\"markdown\", markdown_proto)\n\n def header(self, body, anchor=None):\n \"\"\"Display text in header formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.header('This is a header')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj\n height: 100px\n\n \"\"\"\n header_proto = MarkdownProto()\n if anchor is None:\n header_proto.body = f\"## {clean_text(body)}\"\n else:\n header_proto.body = f'<h2 data-anchor=\"{anchor}\">{clean_text(body)}</h2>'\n header_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", header_proto)\n\n def subheader(self, body, anchor=None):\n \"\"\"Display text in subheader formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.subheader('This is a subheader')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ\n height: 100px\n\n \"\"\"\n subheader_proto = MarkdownProto()\n if anchor is None:\n subheader_proto.body = f\"### {clean_text(body)}\"\n else:\n subheader_proto.body = f'<h3 data-anchor=\"{anchor}\">{clean_text(body)}</h3>'\n subheader_proto.allow_html = True\n\n return self.dg._enqueue(\"markdown\", subheader_proto)\n\n def code(self, body, language=\"python\"):\n \"\"\"Display a code block with optional syntax highlighting.\n\n (This is a convenience wrapper around `st.markdown()`)\n\n Parameters\n ----------\n body : str\n The string to display as code.\n\n language : str\n The language that the code is written in, for syntax highlighting.\n If omitted, the code will be unstyled.\n\n Example\n -------\n >>> code = '''def hello():\n ... print(\"Hello, Streamlit!\")'''\n >>> st.code(code, language='python')\n\n .. output::\n https://static.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2\n height: 100px\n\n \"\"\"\n code_proto = MarkdownProto()\n markdown = \"```%(language)s\\n%(body)s\\n```\" % {\n \"language\": language or \"\",\n \"body\": body,\n }\n code_proto.body = clean_text(markdown)\n return self.dg._enqueue(\"markdown\", code_proto)\n\n def title(self, body, anchor=None):\n \"\"\"Display text in title formatting.\n\n Each document should have a single `st.title()`, although this is not\n enforced.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.title('This is a title')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n title_proto = MarkdownProto()\n if anchor is None:\n title_proto.body = f\"# {clean_text(body)}\"\n else:\n title_proto.body = f'<h1 data-anchor=\"{anchor}\">{clean_text(body)}</h1>'\n title_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", title_proto)\n\n def caption(self, body):\n \"\"\"Display text in small font.\n\n This should be used for captions, asides, footnotes, sidenotes, and\n other explanatory text.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.caption('This is a string that explains something above.')\n\n .. output::\n https://static.streamlit.io/1.1.0-eQCi/index.html?id=SVQb16b2UDZX4W8VLkEJLJ\n height: 175px\n\n \"\"\"\n caption_proto = MarkdownProto()\n caption_proto.body = body\n caption_proto.allow_html = False\n caption_proto.is_caption = True\n return self.dg._enqueue(\"markdown\", caption_proto)\n\n def latex(self, body):\n # This docstring needs to be \"raw\" because of the backslashes in the\n # example below.\n r\"\"\"Display mathematical expressions formatted as LaTeX.\n\n Supported LaTeX functions are listed at\n https://katex.org/docs/supported.html.\n\n Parameters\n ----------\n body : str or SymPy expression\n The string or SymPy expression to display as LaTeX. If str, it's\n a good idea to use raw Python strings since LaTeX uses backslashes\n a lot.\n\n\n Example\n -------\n >>> st.latex(r'''\n ... a + ar + a r^2 + a r^3 + \\cdots + a r^{n-1} =\n ... \\sum_{k=0}^{n-1} ar^k =\n ... a \\left(\\frac{1-r^{n}}{1-r}\\right)\n ... ''')\n\n .. output::\n https://static.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4\n height: 75px\n\n \"\"\"\n if type_util.is_sympy_expession(body):\n import sympy\n\n body = sympy.latex(body)\n\n latex_proto = MarkdownProto()\n latex_proto.body = \"$$\\n%s\\n$$\" % clean_text(body)\n return self.dg._enqueue(\"markdown\", latex_proto)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/markdown.py"}]}
3,349
187
gh_patches_debug_32148
rasdani/github-patches
git_diff
learningequality__kolibri-11937
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enhance Quiz- Side panel doesn't show on mobile ## Overview When I was explaining a certain piece of behavior in the select resource component to @akolson . I discovered that when you want to add resource while on mobile the side panel does not appear at all. or when it's loaded and the page change to mobile view everything get's distorted. Uploading Screen Recording 2024-02-01 at 16.37.15.mov… improve exam preview layout when there are many questions in a quiz ### Observed behavior see comments in https://docs.google.com/document/d/1QeRghWCFdJgWV627DaIWRM80pJ3IkJr_TRQjI4uJXEw/edit#heading=h.kj33sjr8m9b5 ### Expected behavior don't scroll exercise preview offscreen ### User-facing consequences extra scrolling when quizzes have many questions ### Steps to reproduce create a quiz with 50 questions and then try to rearrange them ### Context 0.12 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/plugins/learn/viewsets.py` Content: ``` 1 from django.db.models import Count 2 from django.db.models import OuterRef 3 from django.db.models import Q 4 from django.db.models import Subquery 5 from django.db.models import Sum 6 from django.db.models.fields import IntegerField 7 from rest_framework.permissions import IsAuthenticated 8 from rest_framework.response import Response 9 from rest_framework.views import APIView 10 11 from kolibri.core.api import ReadOnlyValuesViewset 12 from kolibri.core.auth.models import Classroom 13 from kolibri.core.auth.models import Facility 14 from kolibri.core.content.api import ContentNodeProgressViewset 15 from kolibri.core.content.api import ContentNodeViewset 16 from kolibri.core.content.api import UserContentNodeViewset 17 from kolibri.core.content.models import ContentNode 18 from kolibri.core.exams.models import Exam 19 from kolibri.core.lessons.models import Lesson 20 from kolibri.core.logger.models import AttemptLog 21 from kolibri.core.logger.models import MasteryLog 22 23 24 contentnode_progress_viewset = ContentNodeProgressViewset() 25 contentnode_viewset = ContentNodeViewset() 26 user_contentnode_viewset = UserContentNodeViewset() 27 28 29 class LearnStateView(APIView): 30 def get(self, request, format=None): 31 """ 32 Returns some configuration variables applicable to users navigating learn. 33 - in_classes: Whether the user is in any classes 34 - can_download_externally: Whether the user can download content externally from Kolibri 35 """ 36 if request.user.is_anonymous: 37 default_facility = Facility.get_default_facility() 38 can_download_externally = ( 39 default_facility.dataset.show_download_button_in_learn 40 if default_facility 41 else True 42 ) 43 return Response( 44 { 45 "in_classes": False, 46 "can_download_externally": can_download_externally, 47 } 48 ) 49 return Response( 50 { 51 "in_classes": request.user.memberships.exists(), 52 "can_download_externally": request.user.dataset.show_download_button_in_learn, 53 } 54 ) 55 56 57 def _consolidate_lessons_data(request, lessons): 58 lesson_contentnode_ids = set() 59 for lesson in lessons: 60 lesson_contentnode_ids |= { 61 resource["contentnode_id"] for resource in lesson["resources"] 62 } 63 64 contentnode_progress = ( 65 contentnode_progress_viewset.serialize_list( 66 request, {"ids": lesson_contentnode_ids} 67 ) 68 if lesson_contentnode_ids 69 else [] 70 ) 71 72 contentnodes = ( 73 contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids}) 74 if lesson_contentnode_ids 75 else [] 76 ) 77 78 progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress} 79 80 contentnode_map = {c["id"]: c for c in contentnodes} 81 82 for lesson in lessons: 83 lesson["progress"] = { 84 "resource_progress": sum( 85 ( 86 progress_map[resource["content_id"]] 87 for resource in lesson["resources"] 88 if resource["content_id"] in progress_map 89 ) 90 ), 91 "total_resources": len(lesson["resources"]), 92 } 93 missing_resource = False 94 for resource in lesson["resources"]: 95 resource["progress"] = progress_map.get(resource["content_id"], 0) 96 resource["contentnode"] = contentnode_map.get( 97 resource["contentnode_id"], None 98 ) 99 missing_resource = missing_resource or not resource["contentnode"] 100 lesson["missing_resource"] = missing_resource 101 102 103 class LearnerClassroomViewset(ReadOnlyValuesViewset): 104 """ 105 Returns all Classrooms for which the requesting User is a member, 106 along with all associated assignments. 107 """ 108 109 permission_classes = (IsAuthenticated,) 110 111 values = ("id", "name") 112 113 def get_queryset(self): 114 if self.request.user.is_anonymous: 115 return Classroom.objects.none() 116 return Classroom.objects.filter(membership__user=self.request.user) 117 118 def consolidate(self, items, queryset): 119 if not items: 120 return items 121 lessons = ( 122 Lesson.objects.filter( 123 lesson_assignments__collection__membership__user=self.request.user, 124 is_active=True, 125 collection__in=(c["id"] for c in items), 126 ) 127 .distinct() 128 .values( 129 "description", "id", "is_active", "title", "resources", "collection" 130 ) 131 ) 132 _consolidate_lessons_data(self.request, lessons) 133 134 user_masterylog_content_ids = MasteryLog.objects.filter( 135 user=self.request.user 136 ).values("summarylog__content_id") 137 138 exams = ( 139 Exam.objects.filter( 140 assignments__collection__membership__user=self.request.user, 141 collection__in=(c["id"] for c in items), 142 ) 143 .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids)) 144 .annotate( 145 closed=Subquery( 146 MasteryLog.objects.filter( 147 summarylog__content_id=OuterRef("id"), user=self.request.user 148 ).values("complete")[:1] 149 ), 150 score=Subquery( 151 AttemptLog.objects.filter( 152 sessionlog__content_id=OuterRef("id"), user=self.request.user 153 ) 154 .order_by() 155 .values_list("item") 156 .distinct() 157 .values("masterylog") 158 .annotate(total_correct=Sum("correct")) 159 .values("total_correct"), 160 output_field=IntegerField(), 161 ), 162 answer_count=Subquery( 163 AttemptLog.objects.filter( 164 sessionlog__content_id=OuterRef("id"), user=self.request.user 165 ) 166 .order_by() 167 .values_list("item") 168 .distinct() 169 .values("masterylog") 170 .annotate(total_complete=Count("id")) 171 .values("total_complete"), 172 output_field=IntegerField(), 173 ), 174 ) 175 .distinct() 176 .values( 177 "collection", 178 "active", 179 "archive", 180 "id", 181 "question_count", 182 "title", 183 "closed", 184 "answer_count", 185 "score", 186 "question_sources", 187 ) 188 ) 189 exam_node_ids = set() 190 for exam in exams: 191 exam_node_ids |= { 192 question["exercise_id"] for question in exam.get("question_sources") 193 } 194 195 available_exam_ids = set( 196 ContentNode.objects.filter_by_uuids(exam_node_ids).values_list( 197 "id", flat=True 198 ) 199 ) 200 201 for exam in exams: 202 closed = exam.pop("closed") 203 score = exam.pop("score") 204 answer_count = exam.pop("answer_count") 205 if closed is not None: 206 exam["progress"] = { 207 "closed": closed, 208 "score": score, 209 "answer_count": answer_count, 210 "started": True, 211 } 212 else: 213 exam["progress"] = { 214 "score": None, 215 "answer_count": None, 216 "closed": None, 217 "started": False, 218 } 219 exam["missing_resource"] = any( 220 question["exercise_id"] not in available_exam_ids 221 for question in exam.get("question_sources") 222 ) 223 out_items = [] 224 for item in items: 225 item["assignments"] = { 226 "exams": [exam for exam in exams if exam["collection"] == item["id"]], 227 "lessons": [ 228 lesson for lesson in lessons if lesson["collection"] == item["id"] 229 ], 230 } 231 out_items.append(item) 232 return out_items 233 234 235 learner_classroom_viewset = LearnerClassroomViewset() 236 237 238 def _resumable_resources(classrooms): 239 for classroom in classrooms: 240 for lesson in classroom["assignments"]["lessons"]: 241 for resource in lesson["resources"]: 242 yield 0 < resource["progress"] < 1 243 244 245 class LearnHomePageHydrationView(APIView): 246 def get(self, request, format=None): 247 classrooms = [] 248 resumable_resources = [] 249 resumable_resources_progress = [] 250 if not request.user.is_anonymous: 251 classrooms = learner_classroom_viewset.serialize_list(request) 252 if not classrooms or not any(_resumable_resources(classrooms)): 253 resumable_resources = user_contentnode_viewset.serialize_list( 254 request, 255 {"resume": True, "max_results": 12, "ordering": "-last_interacted"}, 256 ) 257 resumable_resources_progress = ( 258 contentnode_progress_viewset.serialize_list( 259 request, 260 { 261 "resume": True, 262 "max_results": 12, 263 "ordering": "-last_interacted", 264 }, 265 ) 266 ) 267 268 return Response( 269 { 270 "classrooms": classrooms, 271 "resumable_resources": resumable_resources, 272 "resumable_resources_progress": resumable_resources_progress, 273 } 274 ) 275 276 277 def _map_lesson_classroom(item): 278 return { 279 "id": item.pop("collection__id"), 280 "name": item.pop("collection__name"), 281 "parent": item.pop("collection__parent_id"), 282 } 283 284 285 class LearnerLessonViewset(ReadOnlyValuesViewset): 286 """ 287 Special Viewset for Learners to view Lessons to which they are assigned. 288 The core Lesson Viewset is locked down to Admin users only. 289 """ 290 291 permission_classes = (IsAuthenticated,) 292 293 values = ( 294 "id", 295 "title", 296 "description", 297 "resources", 298 "is_active", 299 "collection", 300 "collection__id", 301 "collection__name", 302 "collection__parent_id", 303 ) 304 305 field_map = {"classroom": _map_lesson_classroom} 306 307 def get_queryset(self): 308 if self.request.user.is_anonymous: 309 return Lesson.objects.none() 310 return Lesson.objects.filter( 311 lesson_assignments__collection__membership__user=self.request.user, 312 is_active=True, 313 ) 314 315 def consolidate(self, items, queryset): 316 if not items: 317 return items 318 319 _consolidate_lessons_data(self.request, items) 320 321 return items 322 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py --- a/kolibri/plugins/learn/viewsets.py +++ b/kolibri/plugins/learn/viewsets.py @@ -16,6 +16,7 @@ from kolibri.core.content.api import UserContentNodeViewset from kolibri.core.content.models import ContentNode from kolibri.core.exams.models import Exam +from kolibri.core.exams.models import exam_assignment_lookup from kolibri.core.lessons.models import Lesson from kolibri.core.logger.models import AttemptLog from kolibri.core.logger.models import MasteryLog @@ -182,14 +183,19 @@ "title", "closed", "answer_count", + "data_model_version", "score", "question_sources", ) ) exam_node_ids = set() + for exam in exams: exam_node_ids |= { - question["exercise_id"] for question in exam.get("question_sources") + exercise_id + for exercise_id, _ in exam_assignment_lookup( + exam.get("question_sources", []) + ) } available_exam_ids = set( @@ -217,8 +223,10 @@ "started": False, } exam["missing_resource"] = any( - question["exercise_id"] not in available_exam_ids - for question in exam.get("question_sources") + exercise_id not in available_exam_ids + for exercise_id, _ in exam_assignment_lookup( + exam.get("question_sources", []) + ) ) out_items = [] for item in items:
{"golden_diff": "diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py\n--- a/kolibri/plugins/learn/viewsets.py\n+++ b/kolibri/plugins/learn/viewsets.py\n@@ -16,6 +16,7 @@\n from kolibri.core.content.api import UserContentNodeViewset\n from kolibri.core.content.models import ContentNode\n from kolibri.core.exams.models import Exam\n+from kolibri.core.exams.models import exam_assignment_lookup\n from kolibri.core.lessons.models import Lesson\n from kolibri.core.logger.models import AttemptLog\n from kolibri.core.logger.models import MasteryLog\n@@ -182,14 +183,19 @@\n \"title\",\n \"closed\",\n \"answer_count\",\n+ \"data_model_version\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n+\n for exam in exams:\n exam_node_ids |= {\n- question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n+ exercise_id\n+ for exercise_id, _ in exam_assignment_lookup(\n+ exam.get(\"question_sources\", [])\n+ )\n }\n \n available_exam_ids = set(\n@@ -217,8 +223,10 @@\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n- question[\"exercise_id\"] not in available_exam_ids\n- for question in exam.get(\"question_sources\")\n+ exercise_id not in available_exam_ids\n+ for exercise_id, _ in exam_assignment_lookup(\n+ exam.get(\"question_sources\", [])\n+ )\n )\n out_items = []\n for item in items:\n", "issue": "Enhance Quiz- Side panel doesn't show on mobile\n\r\n## Overview\r\n\r\nWhen I was explaining a certain piece of behavior in the select resource component to @akolson . I discovered that when you want to add resource while on mobile the side panel does not appear at all. or when it's loaded and the page change to mobile view everything get's distorted.\r\n\r\n\r\n\r\nUploading Screen Recording 2024-02-01 at 16.37.15.mov\u2026\r\n\r\n\r\n\nimprove exam preview layout when there are many questions in a quiz\n\r\n### Observed behavior\r\n\r\nsee comments in https://docs.google.com/document/d/1QeRghWCFdJgWV627DaIWRM80pJ3IkJr_TRQjI4uJXEw/edit#heading=h.kj33sjr8m9b5\r\n\r\n### Expected behavior\r\n\r\ndon't scroll exercise preview offscreen\r\n\r\n### User-facing consequences\r\n\r\nextra scrolling when quizzes have many questions\r\n\r\n\r\n### Steps to reproduce\r\n\r\ncreate a quiz with 50 questions and then try to rearrange them \r\n\r\n### Context\r\n\r\n0.12\n", "before_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n if lesson_contentnode_ids\n else []\n )\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n for exam in exams:\n exam_node_ids |= {\n question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n question[\"exercise_id\"] not in available_exam_ids\n for question in exam.get(\"question_sources\")\n )\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}], "after_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.exams.models import exam_assignment_lookup\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n if lesson_contentnode_ids\n else []\n )\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"data_model_version\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n\n for exam in exams:\n exam_node_ids |= {\n exercise_id\n for exercise_id, _ in exam_assignment_lookup(\n exam.get(\"question_sources\", [])\n )\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n exercise_id not in available_exam_ids\n for exercise_id, _ in exam_assignment_lookup(\n exam.get(\"question_sources\", [])\n )\n )\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}]}
3,468
363
gh_patches_debug_17251
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ASGI app broken with asyncio Our ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :) Here's a test that fails: ```python import typing import pytest import strawberry from starlette.testclient import TestClient from strawberry.asgi import GraphQL @pytest.fixture def async_schema(): @strawberry.type class Query: @strawberry.field async def hello(self, info, name: typing.Optional[str] = None) -> str: return f"Hello {name or 'world'}" return strawberry.Schema(Query) @pytest.fixture def test_client(async_schema): app = GraphQL(async_schema) return TestClient(app) def test_simple_query(schema, test_client): response = test_client.post("/", json={"query": "{ hello }"}) assert response.json() == {"data": {"hello": "Hello world"}} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/graphql.py` Content: ``` 1 import typing 2 3 from graphql import ( 4 ExecutionResult, 5 GraphQLError, 6 GraphQLSchema, 7 execute as graphql_excute, 8 parse, 9 ) 10 from graphql.subscription import subscribe as graphql_subscribe 11 from graphql.type import validate_schema 12 from graphql.validation import validate 13 14 from .middleware import DirectivesMiddleware 15 16 17 async def execute( 18 schema: GraphQLSchema, 19 query: str, 20 context_value: typing.Any = None, 21 variable_values: typing.Dict[str, typing.Any] = None, 22 operation_name: str = None, 23 ): 24 schema_validation_errors = validate_schema(schema) 25 if schema_validation_errors: 26 return ExecutionResult(data=None, errors=schema_validation_errors) 27 28 try: 29 document = parse(query) 30 except GraphQLError as error: 31 return ExecutionResult(data=None, errors=[error]) 32 except Exception as error: 33 error = GraphQLError(str(error), original_error=error) 34 return ExecutionResult(data=None, errors=[error]) 35 36 validation_errors = validate(schema, document) 37 38 if validation_errors: 39 return ExecutionResult(data=None, errors=validation_errors) 40 41 return graphql_excute( 42 schema, 43 parse(query), 44 middleware=[DirectivesMiddleware()], 45 variable_values=variable_values, 46 operation_name=operation_name, 47 context_value=context_value, 48 ) 49 50 51 async def subscribe( 52 schema: GraphQLSchema, 53 query: str, 54 root_value: typing.Any = None, 55 context_value: typing.Any = None, 56 variable_values: typing.Dict[str, typing.Any] = None, 57 operation_name: str = None, 58 ) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]: 59 document = parse(query) 60 61 return await graphql_subscribe( 62 schema=schema, 63 document=document, 64 root_value=root_value, 65 context_value=context_value, 66 variable_values=variable_values, 67 operation_name=operation_name, 68 ) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/graphql.py b/strawberry/graphql.py --- a/strawberry/graphql.py +++ b/strawberry/graphql.py @@ -1,4 +1,5 @@ import typing +from inspect import isawaitable from graphql import ( ExecutionResult, @@ -38,7 +39,7 @@ if validation_errors: return ExecutionResult(data=None, errors=validation_errors) - return graphql_excute( + result = graphql_excute( schema, parse(query), middleware=[DirectivesMiddleware()], @@ -46,6 +47,9 @@ operation_name=operation_name, context_value=context_value, ) + if isawaitable(result): + result = await typing.cast(typing.Awaitable[ExecutionResult], result) + return result async def subscribe(
{"golden_diff": "diff --git a/strawberry/graphql.py b/strawberry/graphql.py\n--- a/strawberry/graphql.py\n+++ b/strawberry/graphql.py\n@@ -1,4 +1,5 @@\n import typing\n+from inspect import isawaitable\n \n from graphql import (\n ExecutionResult,\n@@ -38,7 +39,7 @@\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n \n- return graphql_excute(\n+ result = graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n@@ -46,6 +47,9 @@\n operation_name=operation_name,\n context_value=context_value,\n )\n+ if isawaitable(result):\n+ result = await typing.cast(typing.Awaitable[ExecutionResult], result)\n+ return result\n \n \n async def subscribe(\n", "issue": "ASGI app broken with asyncio\nOur ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :)\r\n\r\nHere's a test that fails:\r\n\r\n```python\r\nimport typing\r\n\r\nimport pytest\r\n\r\nimport strawberry\r\nfrom starlette.testclient import TestClient\r\nfrom strawberry.asgi import GraphQL\r\n\r\n\r\[email protected]\r\ndef async_schema():\r\n @strawberry.type\r\n class Query:\r\n @strawberry.field\r\n async def hello(self, info, name: typing.Optional[str] = None) -> str:\r\n return f\"Hello {name or 'world'}\"\r\n\r\n return strawberry.Schema(Query)\r\n\r\n\r\[email protected]\r\ndef test_client(async_schema):\r\n app = GraphQL(async_schema)\r\n\r\n return TestClient(app)\r\n\r\n\r\ndef test_simple_query(schema, test_client):\r\n response = test_client.post(\"/\", json={\"query\": \"{ hello }\"})\r\n\r\n assert response.json() == {\"data\": {\"hello\": \"Hello world\"}}\r\n```\n", "before_files": [{"content": "import typing\n\nfrom graphql import (\n ExecutionResult,\n GraphQLError,\n GraphQLSchema,\n execute as graphql_excute,\n parse,\n)\nfrom graphql.subscription import subscribe as graphql_subscribe\nfrom graphql.type import validate_schema\nfrom graphql.validation import validate\n\nfrom .middleware import DirectivesMiddleware\n\n\nasync def execute(\n schema: GraphQLSchema,\n query: str,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n):\n schema_validation_errors = validate_schema(schema)\n if schema_validation_errors:\n return ExecutionResult(data=None, errors=schema_validation_errors)\n\n try:\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(data=None, errors=[error])\n except Exception as error:\n error = GraphQLError(str(error), original_error=error)\n return ExecutionResult(data=None, errors=[error])\n\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n return graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n\n\nasync def subscribe(\n schema: GraphQLSchema,\n query: str,\n root_value: typing.Any = None,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]:\n document = parse(query)\n\n return await graphql_subscribe(\n schema=schema,\n document=document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n", "path": "strawberry/graphql.py"}], "after_files": [{"content": "import typing\nfrom inspect import isawaitable\n\nfrom graphql import (\n ExecutionResult,\n GraphQLError,\n GraphQLSchema,\n execute as graphql_excute,\n parse,\n)\nfrom graphql.subscription import subscribe as graphql_subscribe\nfrom graphql.type import validate_schema\nfrom graphql.validation import validate\n\nfrom .middleware import DirectivesMiddleware\n\n\nasync def execute(\n schema: GraphQLSchema,\n query: str,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n):\n schema_validation_errors = validate_schema(schema)\n if schema_validation_errors:\n return ExecutionResult(data=None, errors=schema_validation_errors)\n\n try:\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(data=None, errors=[error])\n except Exception as error:\n error = GraphQLError(str(error), original_error=error)\n return ExecutionResult(data=None, errors=[error])\n\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n result = graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n if isawaitable(result):\n result = await typing.cast(typing.Awaitable[ExecutionResult], result)\n return result\n\n\nasync def subscribe(\n schema: GraphQLSchema,\n query: str,\n root_value: typing.Any = None,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]:\n document = parse(query)\n\n return await graphql_subscribe(\n schema=schema,\n document=document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n", "path": "strawberry/graphql.py"}]}
990
187
gh_patches_debug_31569
rasdani/github-patches
git_diff
joke2k__faker-74
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Honor Environment LANG I am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core. This is the script i have in my path: https://gist.github.com/makefu/9101269 usage: $ LANG=de_DE.utf-8 faker address Davide-Kaul-Weg 175 94892 Königs Wusterhausen --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/__main__.py` Content: ``` 1 from __future__ import unicode_literals 2 from __future__ import print_function 3 import sys 4 5 if sys.version < '3': 6 text_type = unicode 7 binary_type = str 8 else: 9 text_type = str 10 binary_type = bytes 11 12 13 def print_provider(doc, provider, formatters, excludes=None): 14 if excludes is None: 15 excludes = [] 16 17 print() 18 print("### faker.providers.{0}".format(doc.get_provider_name(provider))) 19 print() 20 21 for signature, example in formatters.items(): 22 if signature in excludes: 23 continue 24 try: 25 lines = text_type(example).expandtabs().splitlines() 26 except UnicodeEncodeError: 27 raise Exception('error on "{0}" with value "{1}"'.format(signature, example)) 28 margin = max(30, doc.max_name_len+1) 29 remains = 150 - margin 30 separator = '#' 31 for line in lines: 32 for i in range(0, (len(line) // remains) + 1): 33 print("\t{fake:<{margin}}{separator} {example}".format( 34 fake=signature, 35 separator=separator, 36 example=line[i*remains:(i+1)*remains], 37 margin=margin 38 )) 39 signature = separator = ' ' 40 41 42 def main(provider_or_field=None, *args): 43 from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES 44 fake = Faker(locale=DEFAULT_LOCALE) 45 46 from faker.providers import BaseProvider 47 base_provider_formatters = [f for f in dir(BaseProvider)] 48 49 if provider_or_field: 50 if '.' in provider_or_field: 51 parts = provider_or_field.split('.') 52 locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE 53 fake = Factory.create(locale, providers=[parts[-1]]) 54 doc = documentor.Documentor(fake) 55 doc.already_generated = base_provider_formatters 56 print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0])) 57 else: 58 try: 59 print(fake.format(provider_or_field, *args)) 60 except AttributeError: 61 print('No faker found for "{0}({1})"'.format(provider_or_field, args)) 62 63 else: 64 doc = documentor.Documentor(fake) 65 66 formatters = doc.get_formatters(with_args=True, with_defaults=True) 67 68 for provider, fakers in formatters: 69 70 print_provider(doc, provider, fakers) 71 72 for lang in AVAILABLE_LOCALES: 73 if lang == DEFAULT_LOCALE: 74 continue 75 print() 76 print('## LANGUAGE {0}'.format(lang)) 77 fake = Faker(locale=lang) 78 d = documentor.Documentor(fake) 79 80 for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang, 81 excludes=base_provider_formatters): 82 print_provider(d, p, fs) 83 84 85 if __name__ == "__main__": 86 if sys.stdout.encoding is None: 87 print("please set python env PYTHONIOENCODING=UTF-8, example: " 88 "export PYTHONIOENCODING=UTF-8, when write to stdout", file=sys.stderr) 89 exit(1) 90 main(*sys.argv[1:]) ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 from setuptools import setup, find_packages 5 6 here = os.path.abspath(os.path.dirname(__file__)) 7 README = open(os.path.join(here, 'README.md')).read() 8 NEWS = open(os.path.join(here, 'NEWS.txt')).read() 9 10 11 version = '0.3.2' 12 13 setup(name='fake-factory', 14 version=version, 15 description="Faker is a Python package that generates fake data for you.", 16 long_description=README + '\n\n' + NEWS, 17 classifiers=[ 18 # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers 19 'Development Status :: 3 - Alpha', 20 'Environment :: Console', 21 'Intended Audience :: Developers', 22 'Programming Language :: Python', 23 'Programming Language :: Python :: 2', 24 'Programming Language :: Python :: 3', 25 'Topic :: Software Development :: Libraries :: Python Modules', 26 'Topic :: Software Development :: Testing', 27 'Topic :: Utilities', 28 'License :: OSI Approved :: MIT License' 29 ], 30 keywords='faker fixtures data test mock generator', 31 author='joke2k', 32 author_email='[email protected]', 33 url='http://github.com/joke2k/faker', 34 license='MIT License', 35 packages=find_packages(exclude=['*.tests']), 36 platforms=["any"], 37 test_suite='faker.tests', 38 ) 39 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/__main__.py b/faker/__main__.py --- a/faker/__main__.py +++ b/faker/__main__.py @@ -1,5 +1,9 @@ from __future__ import unicode_literals from __future__ import print_function + +from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE + +import os import sys if sys.version < '3': @@ -40,7 +44,6 @@ def main(provider_or_field=None, *args): - from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES fake = Faker(locale=DEFAULT_LOCALE) from faker.providers import BaseProvider @@ -82,6 +85,28 @@ print_provider(d, p, fs) +def command(*args): + try: + f = args[0] + except IndexError: + f = '--help' + + if f in ["--help", "-h"]: + main() + sys.exit() + + locale = os.environ['LANG'].split('.')[0] + + if locale not in AVAILABLE_LOCALES: + locale = DEFAULT_LOCALE + fake = Faker(locale=locale) + + try: + print(fake.format(f, *args[1:])) + except AttributeError: + print('No faker found for "{0}"'.format(f)) + + if __name__ == "__main__": if sys.stdout.encoding is None: print("please set python env PYTHONIOENCODING=UTF-8, example: " diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ version=version, description="Faker is a Python package that generates fake data for you.", long_description=README + '\n\n' + NEWS, + scripts=['bin/faker'], classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 3 - Alpha',
{"golden_diff": "diff --git a/faker/__main__.py b/faker/__main__.py\n--- a/faker/__main__.py\n+++ b/faker/__main__.py\n@@ -1,5 +1,9 @@\n from __future__ import unicode_literals\n from __future__ import print_function\n+\n+from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE\n+\n+import os\n import sys\n \n if sys.version < '3':\n@@ -40,7 +44,6 @@\n \n \n def main(provider_or_field=None, *args):\n- from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n \n from faker.providers import BaseProvider\n@@ -82,6 +85,28 @@\n print_provider(d, p, fs)\n \n \n+def command(*args):\n+ try:\n+ f = args[0]\n+ except IndexError:\n+ f = '--help'\n+\n+ if f in [\"--help\", \"-h\"]:\n+ main()\n+ sys.exit()\n+\n+ locale = os.environ['LANG'].split('.')[0]\n+ \n+ if locale not in AVAILABLE_LOCALES:\n+ locale = DEFAULT_LOCALE\n+ fake = Faker(locale=locale)\n+\n+ try:\n+ print(fake.format(f, *args[1:]))\n+ except AttributeError:\n+ print('No faker found for \"{0}\"'.format(f))\n+\n+\n if __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n+ scripts=['bin/faker'],\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n", "issue": "Honor Environment LANG\nI am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core.\n\nThis is the script i have in my path: https://gist.github.com/makefu/9101269\n\nusage: \n$ LANG=de_DE.utf-8 faker address\n Davide-Kaul-Weg 175\n 94892 K\u00f6nigs Wusterhausen\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import print_function\nimport sys\n\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\n\ndef print_provider(doc, provider, formatters, excludes=None):\n if excludes is None:\n excludes = []\n\n print()\n print(\"### faker.providers.{0}\".format(doc.get_provider_name(provider)))\n print()\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n lines = text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n raise Exception('error on \"{0}\" with value \"{1}\"'.format(signature, example))\n margin = max(30, doc.max_name_len+1)\n remains = 150 - margin\n separator = '#'\n for line in lines:\n for i in range(0, (len(line) // remains) + 1):\n print(\"\\t{fake:<{margin}}{separator} {example}\".format(\n fake=signature,\n separator=separator,\n example=line[i*remains:(i+1)*remains],\n margin=margin\n ))\n signature = separator = ' '\n\n\ndef main(provider_or_field=None, *args):\n from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n if provider_or_field:\n if '.' in provider_or_field:\n parts = provider_or_field.split('.')\n locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE\n fake = Factory.create(locale, providers=[parts[-1]])\n doc = documentor.Documentor(fake)\n doc.already_generated = base_provider_formatters\n print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0]))\n else:\n try:\n print(fake.format(provider_or_field, *args))\n except AttributeError:\n print('No faker found for \"{0}({1})\"'.format(provider_or_field, args))\n\n else:\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n\n print_provider(doc, provider, fakers)\n\n for lang in AVAILABLE_LOCALES:\n if lang == DEFAULT_LOCALE:\n continue\n print()\n print('## LANGUAGE {0}'.format(lang))\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang,\n excludes=base_provider_formatters):\n print_provider(d, p, fs)\n\n\nif __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\n \"export PYTHONIOENCODING=UTF-8, when write to stdout\", file=sys.stderr)\n exit(1)\n main(*sys.argv[1:])", "path": "faker/__main__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\nNEWS = open(os.path.join(here, 'NEWS.txt')).read()\n\n\nversion = '0.3.2'\n\nsetup(name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n )\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import print_function\n\nfrom faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE\n\nimport os\nimport sys\n\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\n\ndef print_provider(doc, provider, formatters, excludes=None):\n if excludes is None:\n excludes = []\n\n print()\n print(\"### faker.providers.{0}\".format(doc.get_provider_name(provider)))\n print()\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n lines = text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n raise Exception('error on \"{0}\" with value \"{1}\"'.format(signature, example))\n margin = max(30, doc.max_name_len+1)\n remains = 150 - margin\n separator = '#'\n for line in lines:\n for i in range(0, (len(line) // remains) + 1):\n print(\"\\t{fake:<{margin}}{separator} {example}\".format(\n fake=signature,\n separator=separator,\n example=line[i*remains:(i+1)*remains],\n margin=margin\n ))\n signature = separator = ' '\n\n\ndef main(provider_or_field=None, *args):\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n if provider_or_field:\n if '.' in provider_or_field:\n parts = provider_or_field.split('.')\n locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE\n fake = Factory.create(locale, providers=[parts[-1]])\n doc = documentor.Documentor(fake)\n doc.already_generated = base_provider_formatters\n print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0]))\n else:\n try:\n print(fake.format(provider_or_field, *args))\n except AttributeError:\n print('No faker found for \"{0}({1})\"'.format(provider_or_field, args))\n\n else:\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n\n print_provider(doc, provider, fakers)\n\n for lang in AVAILABLE_LOCALES:\n if lang == DEFAULT_LOCALE:\n continue\n print()\n print('## LANGUAGE {0}'.format(lang))\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang,\n excludes=base_provider_formatters):\n print_provider(d, p, fs)\n\n\ndef command(*args):\n try:\n f = args[0]\n except IndexError:\n f = '--help'\n\n if f in [\"--help\", \"-h\"]:\n main()\n sys.exit()\n\n locale = os.environ['LANG'].split('.')[0]\n \n if locale not in AVAILABLE_LOCALES:\n locale = DEFAULT_LOCALE\n fake = Faker(locale=locale)\n\n try:\n print(fake.format(f, *args[1:]))\n except AttributeError:\n print('No faker found for \"{0}\"'.format(f))\n\n\nif __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\n \"export PYTHONIOENCODING=UTF-8, when write to stdout\", file=sys.stderr)\n exit(1)\n main(*sys.argv[1:])", "path": "faker/__main__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\nNEWS = open(os.path.join(here, 'NEWS.txt')).read()\n\n\nversion = '0.3.2'\n\nsetup(name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n scripts=['bin/faker'],\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n )\n", "path": "setup.py"}]}
1,617
464
gh_patches_debug_35374
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3156
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Language: support mention type in Entity.mentions. [Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type. Furthermore, this change should add mention_type information to the mention documentation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `language/google/cloud/language/entity.py` Content: ``` 1 # Copyright 2016-2017 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Definition for Google Cloud Natural Language API entities. 16 17 An entity is used to describe a proper name extracted from text. 18 """ 19 20 21 class EntityType(object): 22 """List of possible entity types.""" 23 24 UNKNOWN = 'UNKNOWN' 25 """Unknown entity type.""" 26 27 PERSON = 'PERSON' 28 """Person entity type.""" 29 30 LOCATION = 'LOCATION' 31 """Location entity type.""" 32 33 ORGANIZATION = 'ORGANIZATION' 34 """Organization entity type.""" 35 36 EVENT = 'EVENT' 37 """Event entity type.""" 38 39 WORK_OF_ART = 'WORK_OF_ART' 40 """Work of art entity type.""" 41 42 CONSUMER_GOOD = 'CONSUMER_GOOD' 43 """Consumer good entity type.""" 44 45 OTHER = 'OTHER' 46 """Other entity type (i.e. known but not classified).""" 47 48 49 class Entity(object): 50 """A Google Cloud Natural Language API entity. 51 52 Represents a phrase in text that is a known entity, such as a person, 53 an organization, or location. The API associates information, such as 54 salience and mentions, with entities. 55 56 .. _Entity message: https://cloud.google.com/natural-language/\ 57 reference/rest/v1/Entity 58 .. _EntityType enum: https://cloud.google.com/natural-language/\ 59 reference/rest/v1/Entity#Type 60 61 See `Entity message`_. 62 63 :type name: str 64 :param name: The name / phrase identified as the entity. 65 66 :type entity_type: str 67 :param entity_type: The type of the entity. See `EntityType enum`_. 68 69 :type metadata: dict 70 :param metadata: The metadata associated with the entity. 71 Wikipedia URLs and Knowledge Graph MIDs are 72 provided, if available. The associated keys are 73 "wikipedia_url" and "mid", respectively. 74 75 :type salience: float 76 :param salience: The prominence of the entity / phrase within the text 77 containing it. 78 79 :type mentions: list 80 :param mentions: List of strings that mention the entity. 81 """ 82 83 def __init__(self, name, entity_type, metadata, salience, mentions): 84 self.name = name 85 self.entity_type = entity_type 86 self.metadata = metadata 87 self.salience = salience 88 self.mentions = mentions 89 90 @classmethod 91 def from_api_repr(cls, payload): 92 """Convert an Entity from the JSON API into an :class:`Entity`. 93 94 :param payload: dict 95 :type payload: The value from the backend. 96 97 :rtype: :class:`Entity` 98 :returns: The entity parsed from the API representation. 99 """ 100 name = payload['name'] 101 entity_type = payload['type'] 102 metadata = payload['metadata'] 103 salience = payload['salience'] 104 mentions = [value['text']['content'] 105 for value in payload['mentions']] 106 return cls(name, entity_type, metadata, salience, mentions) 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py --- a/language/google/cloud/language/entity.py +++ b/language/google/cloud/language/entity.py @@ -46,6 +46,80 @@ """Other entity type (i.e. known but not classified).""" +class MentionType(object): + """List of possible mention types.""" + + TYPE_UNKNOWN = 'TYPE_UNKNOWN' + """Unknown mention type""" + + PROPER = 'PROPER' + """Proper name""" + + COMMON = 'COMMON' + """Common noun (or noun compound)""" + + +class Mention(object): + """A Google Cloud Natural Language API mention. + + Represents a mention for an entity in the text. Currently, proper noun + mentions are supported. + """ + def __init__(self, text, mention_type): + self.text = text + self.mention_type = mention_type + + def __str__(self): + return str(self.text) + + @classmethod + def from_api_repr(cls, payload): + """Convert a Mention from the JSON API into an :class:`Mention`. + + :param payload: dict + :type payload: The value from the backend. + + :rtype: :class:`Mention` + :returns: The mention parsed from the API representation. + """ + text = TextSpan.from_api_repr(payload['text']) + mention_type = payload['type'] + return cls(text, mention_type) + + +class TextSpan(object): + """A span of text from Google Cloud Natural Language API. + + Represents a word or phrase of text, as well as its offset + from the original document. + """ + def __init__(self, content, begin_offset): + self.content = content + self.begin_offset = begin_offset + + def __str__(self): + """Return the string representation of this TextSpan. + + :rtype: str + :returns: The text content + """ + return self.content + + @classmethod + def from_api_repr(cls, payload): + """Convert a TextSpan from the JSON API into an :class:`TextSpan`. + + :param payload: dict + :type payload: The value from the backend. + + :rtype: :class:`TextSpan` + :returns: The text span parsed from the API representation. + """ + content = payload['content'] + begin_offset = payload['beginOffset'] + return cls(content=content, begin_offset=begin_offset) + + class Entity(object): """A Google Cloud Natural Language API entity. @@ -101,6 +175,5 @@ entity_type = payload['type'] metadata = payload['metadata'] salience = payload['salience'] - mentions = [value['text']['content'] - for value in payload['mentions']] + mentions = [Mention.from_api_repr(val) for val in payload['mentions']] return cls(name, entity_type, metadata, salience, mentions)
{"golden_diff": "diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py\n--- a/language/google/cloud/language/entity.py\n+++ b/language/google/cloud/language/entity.py\n@@ -46,6 +46,80 @@\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n \n \n+class MentionType(object):\n+ \"\"\"List of possible mention types.\"\"\"\n+\n+ TYPE_UNKNOWN = 'TYPE_UNKNOWN'\n+ \"\"\"Unknown mention type\"\"\"\n+\n+ PROPER = 'PROPER'\n+ \"\"\"Proper name\"\"\"\n+\n+ COMMON = 'COMMON'\n+ \"\"\"Common noun (or noun compound)\"\"\"\n+\n+\n+class Mention(object):\n+ \"\"\"A Google Cloud Natural Language API mention.\n+\n+ Represents a mention for an entity in the text. Currently, proper noun\n+ mentions are supported.\n+ \"\"\"\n+ def __init__(self, text, mention_type):\n+ self.text = text\n+ self.mention_type = mention_type\n+\n+ def __str__(self):\n+ return str(self.text)\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a Mention from the JSON API into an :class:`Mention`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`Mention`\n+ :returns: The mention parsed from the API representation.\n+ \"\"\"\n+ text = TextSpan.from_api_repr(payload['text'])\n+ mention_type = payload['type']\n+ return cls(text, mention_type)\n+\n+\n+class TextSpan(object):\n+ \"\"\"A span of text from Google Cloud Natural Language API.\n+\n+ Represents a word or phrase of text, as well as its offset\n+ from the original document.\n+ \"\"\"\n+ def __init__(self, content, begin_offset):\n+ self.content = content\n+ self.begin_offset = begin_offset\n+\n+ def __str__(self):\n+ \"\"\"Return the string representation of this TextSpan.\n+\n+ :rtype: str\n+ :returns: The text content\n+ \"\"\"\n+ return self.content\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a TextSpan from the JSON API into an :class:`TextSpan`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`TextSpan`\n+ :returns: The text span parsed from the API representation.\n+ \"\"\"\n+ content = payload['content']\n+ begin_offset = payload['beginOffset']\n+ return cls(content=content, begin_offset=begin_offset)\n+\n+\n class Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n \n@@ -101,6 +175,5 @@\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n- mentions = [value['text']['content']\n- for value in payload['mentions']]\n+ mentions = [Mention.from_api_repr(val) for val in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "issue": "Language: support mention type in Entity.mentions.\n[Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type.\r\n\r\nFurthermore, this change should add mention_type information to the mention documentation.\n", "before_files": [{"content": "# Copyright 2016-2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API entities.\n\nAn entity is used to describe a proper name extracted from text.\n\"\"\"\n\n\nclass EntityType(object):\n \"\"\"List of possible entity types.\"\"\"\n\n UNKNOWN = 'UNKNOWN'\n \"\"\"Unknown entity type.\"\"\"\n\n PERSON = 'PERSON'\n \"\"\"Person entity type.\"\"\"\n\n LOCATION = 'LOCATION'\n \"\"\"Location entity type.\"\"\"\n\n ORGANIZATION = 'ORGANIZATION'\n \"\"\"Organization entity type.\"\"\"\n\n EVENT = 'EVENT'\n \"\"\"Event entity type.\"\"\"\n\n WORK_OF_ART = 'WORK_OF_ART'\n \"\"\"Work of art entity type.\"\"\"\n\n CONSUMER_GOOD = 'CONSUMER_GOOD'\n \"\"\"Consumer good entity type.\"\"\"\n\n OTHER = 'OTHER'\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n\n\nclass Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n\n Represents a phrase in text that is a known entity, such as a person,\n an organization, or location. The API associates information, such as\n salience and mentions, with entities.\n\n .. _Entity message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity\n .. _EntityType enum: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity#Type\n\n See `Entity message`_.\n\n :type name: str\n :param name: The name / phrase identified as the entity.\n\n :type entity_type: str\n :param entity_type: The type of the entity. See `EntityType enum`_.\n\n :type metadata: dict\n :param metadata: The metadata associated with the entity.\n Wikipedia URLs and Knowledge Graph MIDs are\n provided, if available. The associated keys are\n \"wikipedia_url\" and \"mid\", respectively.\n\n :type salience: float\n :param salience: The prominence of the entity / phrase within the text\n containing it.\n\n :type mentions: list\n :param mentions: List of strings that mention the entity.\n \"\"\"\n\n def __init__(self, name, entity_type, metadata, salience, mentions):\n self.name = name\n self.entity_type = entity_type\n self.metadata = metadata\n self.salience = salience\n self.mentions = mentions\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert an Entity from the JSON API into an :class:`Entity`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Entity`\n :returns: The entity parsed from the API representation.\n \"\"\"\n name = payload['name']\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n mentions = [value['text']['content']\n for value in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "path": "language/google/cloud/language/entity.py"}], "after_files": [{"content": "# Copyright 2016-2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API entities.\n\nAn entity is used to describe a proper name extracted from text.\n\"\"\"\n\n\nclass EntityType(object):\n \"\"\"List of possible entity types.\"\"\"\n\n UNKNOWN = 'UNKNOWN'\n \"\"\"Unknown entity type.\"\"\"\n\n PERSON = 'PERSON'\n \"\"\"Person entity type.\"\"\"\n\n LOCATION = 'LOCATION'\n \"\"\"Location entity type.\"\"\"\n\n ORGANIZATION = 'ORGANIZATION'\n \"\"\"Organization entity type.\"\"\"\n\n EVENT = 'EVENT'\n \"\"\"Event entity type.\"\"\"\n\n WORK_OF_ART = 'WORK_OF_ART'\n \"\"\"Work of art entity type.\"\"\"\n\n CONSUMER_GOOD = 'CONSUMER_GOOD'\n \"\"\"Consumer good entity type.\"\"\"\n\n OTHER = 'OTHER'\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n\n\nclass MentionType(object):\n \"\"\"List of possible mention types.\"\"\"\n\n TYPE_UNKNOWN = 'TYPE_UNKNOWN'\n \"\"\"Unknown mention type\"\"\"\n\n PROPER = 'PROPER'\n \"\"\"Proper name\"\"\"\n\n COMMON = 'COMMON'\n \"\"\"Common noun (or noun compound)\"\"\"\n\n\nclass Mention(object):\n \"\"\"A Google Cloud Natural Language API mention.\n\n Represents a mention for an entity in the text. Currently, proper noun\n mentions are supported.\n \"\"\"\n def __init__(self, text, mention_type):\n self.text = text\n self.mention_type = mention_type\n\n def __str__(self):\n return str(self.text)\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a Mention from the JSON API into an :class:`Mention`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Mention`\n :returns: The mention parsed from the API representation.\n \"\"\"\n text = TextSpan.from_api_repr(payload['text'])\n mention_type = payload['type']\n return cls(text, mention_type)\n\n\nclass TextSpan(object):\n \"\"\"A span of text from Google Cloud Natural Language API.\n\n Represents a word or phrase of text, as well as its offset\n from the original document.\n \"\"\"\n def __init__(self, content, begin_offset):\n self.content = content\n self.begin_offset = begin_offset\n\n def __str__(self):\n \"\"\"Return the string representation of this TextSpan.\n\n :rtype: str\n :returns: The text content\n \"\"\"\n return self.content\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a TextSpan from the JSON API into an :class:`TextSpan`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`TextSpan`\n :returns: The text span parsed from the API representation.\n \"\"\"\n content = payload['content']\n begin_offset = payload['beginOffset']\n return cls(content=content, begin_offset=begin_offset)\n\n\nclass Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n\n Represents a phrase in text that is a known entity, such as a person,\n an organization, or location. The API associates information, such as\n salience and mentions, with entities.\n\n .. _Entity message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity\n .. _EntityType enum: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity#Type\n\n See `Entity message`_.\n\n :type name: str\n :param name: The name / phrase identified as the entity.\n\n :type entity_type: str\n :param entity_type: The type of the entity. See `EntityType enum`_.\n\n :type metadata: dict\n :param metadata: The metadata associated with the entity.\n Wikipedia URLs and Knowledge Graph MIDs are\n provided, if available. The associated keys are\n \"wikipedia_url\" and \"mid\", respectively.\n\n :type salience: float\n :param salience: The prominence of the entity / phrase within the text\n containing it.\n\n :type mentions: list\n :param mentions: List of strings that mention the entity.\n \"\"\"\n\n def __init__(self, name, entity_type, metadata, salience, mentions):\n self.name = name\n self.entity_type = entity_type\n self.metadata = metadata\n self.salience = salience\n self.mentions = mentions\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert an Entity from the JSON API into an :class:`Entity`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Entity`\n :returns: The entity parsed from the API representation.\n \"\"\"\n name = payload['name']\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n mentions = [Mention.from_api_repr(val) for val in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "path": "language/google/cloud/language/entity.py"}]}
1,321
689
gh_patches_debug_18622
rasdani/github-patches
git_diff
translate__pootle-6643
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Traceback with migration pootle_fs.0002_convert_localfs I hit this traceback when running the new migrations in my local DB, specifically `pootle_fs.0002_convert_localfs`: ```pytb Running migrations: Applying pootle_fs.0002_convert_localfs...Traceback (most recent call last): File "manage.py", line 24, in <module> execute_from_command_line() File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 367, in execute_from_command_line utility.execute() File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 359, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py", line 294, in run_from_argv self.execute(*args, **cmd_options) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py", line 345, in execute output = self.handle(*args, **options) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/commands/migrate.py", line 204, in handle fake_initial=fake_initial, File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 115, in migrate state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 145, in _migrate_all_forwards state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 244, in apply_migration state = migration.apply(state, schema_editor) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/migration.py", line 126, in apply operation.database_forwards(self.app_label, schema_editor, old_state, project_state) File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/operations/special.py", line 189, in database_forwards self.code(from_state.apps, schema_editor) File "/home/leo/Escritorio/core_pootle/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py", line 118, in convert_to_localfs filepath = store.file.path[len(proj_trans_path):] File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py", line 63, in _get_path self._require_file() File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py", line 46, in _require_file raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) ValueError: The 'file' attribute has no file associated with it. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_store/migrations/0033_remove_store_file.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Generated by Django 1.10.7 on 2017-08-26 14:01 3 from __future__ import unicode_literals 4 5 from django.db import migrations 6 7 8 class Migration(migrations.Migration): 9 10 dependencies = [ 11 ('pootle_store', '0032_fix_empty_wordcounts'), 12 ] 13 14 operations = [ 15 migrations.RemoveField( 16 model_name='store', 17 name='file', 18 ), 19 ] 20 ``` Path: `pootle/apps/pootle_fs/migrations/0002_convert_localfs.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Generated by Django 1.10.7 on 2017-08-17 09:16 3 from __future__ import unicode_literals 4 5 import logging 6 import os 7 from functools import partial 8 9 import dirsync 10 11 from django.conf import settings 12 from django.db import migrations 13 14 from translate.lang.data import langcode_re 15 16 17 def _file_belongs_to_project(project, filename): 18 ext = os.path.splitext(filename)[1][1:] 19 filetype_extensions = list( 20 project.filetypes.values_list( 21 "extension__name", flat=True)) 22 template_extensions = list( 23 project.filetypes.values_list( 24 "template_extension__name", flat=True)) 25 return ( 26 ext in filetype_extensions 27 or (ext in template_extensions)) 28 29 30 def _detect_treestyle_and_path(project, proj_trans_path): 31 dirlisting = os.walk(proj_trans_path) 32 dirpath_, dirnames, filenames = dirlisting.next() 33 34 if not dirnames: 35 # No subdirectories 36 if filter(partial(_file_belongs_to_project, project), filenames): 37 # Translation files found, assume gnu 38 return "gnu", "" 39 40 # There are subdirectories 41 has_subdirs = filter( 42 (lambda dirname: dirname == 'templates' 43 or langcode_re.match(dirname)), 44 dirnames) 45 if has_subdirs: 46 return "nongnu", None 47 48 # No language subdirs found, look for any translation file 49 # in subdirs 50 for dirpath_, dirnames, filenames in os.walk(proj_trans_path): 51 if filter(partial(_file_belongs_to_project, project), filenames): 52 return "gnu", dirpath_.replace(proj_trans_path, "") 53 # Unsure 54 return "nongnu", None 55 56 57 def _get_translation_mapping(project): 58 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY 59 proj_trans_path = os.path.join(old_translation_path, project.code) 60 old_treestyle, old_path = ( 61 _detect_treestyle_and_path(project, proj_trans_path) 62 if project.treestyle == "auto" 63 else (project.treestyle, None)) 64 project.treestyle = "pootle_fs" 65 if old_treestyle == "nongnu": 66 return "/<language_code>/<dir_path>/<filename>.<ext>" 67 else: 68 return "%s/<language_code>.<ext>" % (old_path) 69 70 71 def _set_project_config(Config, project_ct, project): 72 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY 73 proj_trans_path = os.path.join(old_translation_path, project.code) 74 configs = Config.objects.filter( 75 content_type=project_ct, 76 object_pk=project.pk) 77 configs.delete() 78 Config.objects.update_or_create( 79 content_type=project_ct, 80 object_pk=project.pk, 81 key="pootle_fs.fs_url", 82 defaults=dict( 83 value=proj_trans_path)) 84 Config.objects.update_or_create( 85 content_type=project_ct, 86 object_pk=project.pk, 87 key="pootle_fs.fs_type", 88 defaults=dict( 89 value="localfs")) 90 Config.objects.update_or_create( 91 content_type=project_ct, 92 object_pk=project.pk, 93 key="pootle_fs.translation_mappings", 94 defaults=dict( 95 value=dict(default=_get_translation_mapping(project)))) 96 97 98 def convert_to_localfs(apps, schema_editor): 99 Project = apps.get_model("pootle_project.Project") 100 Store = apps.get_model("pootle_store.Store") 101 StoreFS = apps.get_model("pootle_fs.StoreFS") 102 Config = apps.get_model("pootle_config.Config") 103 ContentType = apps.get_model("contenttypes.ContentType") 104 project_ct = ContentType.objects.get_for_model(Project) 105 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY 106 107 for project in Project.objects.exclude(treestyle="pootle_fs"): 108 proj_trans_path = os.path.join(old_translation_path, project.code) 109 proj_stores = Store.objects.filter( 110 translation_project__project=project) 111 _set_project_config(Config, project_ct, project) 112 project.treestyle = "pootle_fs" 113 project.save() 114 store_fs = StoreFS.objects.filter( 115 store__translation_project__project=project) 116 store_fs.delete() 117 for store in proj_stores: 118 filepath = store.file.path[len(proj_trans_path):] 119 StoreFS.objects.update_or_create( 120 project=project, 121 store=store, 122 defaults=dict( 123 path=filepath, 124 pootle_path=store.pootle_path, 125 last_sync_revision=store.last_sync_revision, 126 last_sync_mtime=store.file_mtime)) 127 fs_temp = os.path.join( 128 settings.POOTLE_FS_WORKING_PATH, project.code) 129 dirsync.sync( 130 proj_trans_path, 131 fs_temp, 132 "sync", 133 create=True, 134 purge=True, 135 logger=logging.getLogger(dirsync.__name__)) 136 137 138 class Migration(migrations.Migration): 139 140 dependencies = [ 141 ('contenttypes', '0002_remove_content_type_name'), 142 ('pootle_fs', '0001_initial'), 143 ('pootle_format', '0003_remove_extra_indeces'), 144 ('pootle_config', '0001_initial'), 145 ('pootle_store', '0013_set_store_filetype_again'), 146 ('pootle_project', '0016_change_treestyle_choices_label'), 147 ] 148 149 operations = [ 150 migrations.RunPython(convert_to_localfs), 151 ] 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py --- a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py +++ b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py @@ -107,7 +107,7 @@ for project in Project.objects.exclude(treestyle="pootle_fs"): proj_trans_path = os.path.join(old_translation_path, project.code) proj_stores = Store.objects.filter( - translation_project__project=project) + translation_project__project=project).exclude(file="") _set_project_config(Config, project_ct, project) project.treestyle = "pootle_fs" project.save() diff --git a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py --- a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py +++ b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py @@ -8,6 +8,7 @@ class Migration(migrations.Migration): dependencies = [ + ('pootle_fs', '0002_convert_localfs'), ('pootle_store', '0032_fix_empty_wordcounts'), ]
{"golden_diff": "diff --git a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n--- a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n+++ b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n@@ -107,7 +107,7 @@\n for project in Project.objects.exclude(treestyle=\"pootle_fs\"):\n proj_trans_path = os.path.join(old_translation_path, project.code)\n proj_stores = Store.objects.filter(\n- translation_project__project=project)\n+ translation_project__project=project).exclude(file=\"\")\n _set_project_config(Config, project_ct, project)\n project.treestyle = \"pootle_fs\"\n project.save()\ndiff --git a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n--- a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n+++ b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n@@ -8,6 +8,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n+ ('pootle_fs', '0002_convert_localfs'),\n ('pootle_store', '0032_fix_empty_wordcounts'),\n ]\n", "issue": "Traceback with migration pootle_fs.0002_convert_localfs\nI hit this traceback when running the new migrations in my local DB, specifically `pootle_fs.0002_convert_localfs`:\r\n\r\n\r\n```pytb\r\nRunning migrations:\r\n Applying pootle_fs.0002_convert_localfs...Traceback (most recent call last):\r\n File \"manage.py\", line 24, in <module>\r\n execute_from_command_line()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 367, in execute_from_command_line\r\n utility.execute()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 359, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py\", line 294, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py\", line 345, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/commands/migrate.py\", line 204, in handle\r\n fake_initial=fake_initial,\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 115, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 145, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 244, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/operations/special.py\", line 189, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/home/leo/Escritorio/core_pootle/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\", line 118, in convert_to_localfs\r\n filepath = store.file.path[len(proj_trans_path):]\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py\", line 63, in _get_path\r\n self._require_file()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py\", line 46, in _require_file\r\n raise ValueError(\"The '%s' attribute has no file associated with it.\" % self.field.name)\r\nValueError: The 'file' attribute has no file associated with it.\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-26 14:01\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pootle_store', '0032_fix_empty_wordcounts'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='store',\n name='file',\n ),\n ]\n", "path": "pootle/apps/pootle_store/migrations/0033_remove_store_file.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-17 09:16\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nfrom functools import partial\n\nimport dirsync\n\nfrom django.conf import settings\nfrom django.db import migrations\n\nfrom translate.lang.data import langcode_re\n\n\ndef _file_belongs_to_project(project, filename):\n ext = os.path.splitext(filename)[1][1:]\n filetype_extensions = list(\n project.filetypes.values_list(\n \"extension__name\", flat=True))\n template_extensions = list(\n project.filetypes.values_list(\n \"template_extension__name\", flat=True))\n return (\n ext in filetype_extensions\n or (ext in template_extensions))\n\n\ndef _detect_treestyle_and_path(project, proj_trans_path):\n dirlisting = os.walk(proj_trans_path)\n dirpath_, dirnames, filenames = dirlisting.next()\n\n if not dirnames:\n # No subdirectories\n if filter(partial(_file_belongs_to_project, project), filenames):\n # Translation files found, assume gnu\n return \"gnu\", \"\"\n\n # There are subdirectories\n has_subdirs = filter(\n (lambda dirname: dirname == 'templates'\n or langcode_re.match(dirname)),\n dirnames)\n if has_subdirs:\n return \"nongnu\", None\n\n # No language subdirs found, look for any translation file\n # in subdirs\n for dirpath_, dirnames, filenames in os.walk(proj_trans_path):\n if filter(partial(_file_belongs_to_project, project), filenames):\n return \"gnu\", dirpath_.replace(proj_trans_path, \"\")\n # Unsure\n return \"nongnu\", None\n\n\ndef _get_translation_mapping(project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n old_treestyle, old_path = (\n _detect_treestyle_and_path(project, proj_trans_path)\n if project.treestyle == \"auto\"\n else (project.treestyle, None))\n project.treestyle = \"pootle_fs\"\n if old_treestyle == \"nongnu\":\n return \"/<language_code>/<dir_path>/<filename>.<ext>\"\n else:\n return \"%s/<language_code>.<ext>\" % (old_path)\n\n\ndef _set_project_config(Config, project_ct, project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n configs = Config.objects.filter(\n content_type=project_ct,\n object_pk=project.pk)\n configs.delete()\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_url\",\n defaults=dict(\n value=proj_trans_path))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_type\",\n defaults=dict(\n value=\"localfs\"))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.translation_mappings\",\n defaults=dict(\n value=dict(default=_get_translation_mapping(project))))\n\n\ndef convert_to_localfs(apps, schema_editor):\n Project = apps.get_model(\"pootle_project.Project\")\n Store = apps.get_model(\"pootle_store.Store\")\n StoreFS = apps.get_model(\"pootle_fs.StoreFS\")\n Config = apps.get_model(\"pootle_config.Config\")\n ContentType = apps.get_model(\"contenttypes.ContentType\")\n project_ct = ContentType.objects.get_for_model(Project)\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n\n for project in Project.objects.exclude(treestyle=\"pootle_fs\"):\n proj_trans_path = os.path.join(old_translation_path, project.code)\n proj_stores = Store.objects.filter(\n translation_project__project=project)\n _set_project_config(Config, project_ct, project)\n project.treestyle = \"pootle_fs\"\n project.save()\n store_fs = StoreFS.objects.filter(\n store__translation_project__project=project)\n store_fs.delete()\n for store in proj_stores:\n filepath = store.file.path[len(proj_trans_path):]\n StoreFS.objects.update_or_create(\n project=project,\n store=store,\n defaults=dict(\n path=filepath,\n pootle_path=store.pootle_path,\n last_sync_revision=store.last_sync_revision,\n last_sync_mtime=store.file_mtime))\n fs_temp = os.path.join(\n settings.POOTLE_FS_WORKING_PATH, project.code)\n dirsync.sync(\n proj_trans_path,\n fs_temp,\n \"sync\",\n create=True,\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ('pootle_fs', '0001_initial'),\n ('pootle_format', '0003_remove_extra_indeces'),\n ('pootle_config', '0001_initial'),\n ('pootle_store', '0013_set_store_filetype_again'),\n ('pootle_project', '0016_change_treestyle_choices_label'),\n ]\n\n operations = [\n migrations.RunPython(convert_to_localfs),\n ]\n", "path": "pootle/apps/pootle_fs/migrations/0002_convert_localfs.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-26 14:01\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pootle_fs', '0002_convert_localfs'),\n ('pootle_store', '0032_fix_empty_wordcounts'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='store',\n name='file',\n ),\n ]\n", "path": "pootle/apps/pootle_store/migrations/0033_remove_store_file.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-17 09:16\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nfrom functools import partial\n\nimport dirsync\n\nfrom django.conf import settings\nfrom django.db import migrations\n\nfrom translate.lang.data import langcode_re\n\n\ndef _file_belongs_to_project(project, filename):\n ext = os.path.splitext(filename)[1][1:]\n filetype_extensions = list(\n project.filetypes.values_list(\n \"extension__name\", flat=True))\n template_extensions = list(\n project.filetypes.values_list(\n \"template_extension__name\", flat=True))\n return (\n ext in filetype_extensions\n or (ext in template_extensions))\n\n\ndef _detect_treestyle_and_path(project, proj_trans_path):\n dirlisting = os.walk(proj_trans_path)\n dirpath_, dirnames, filenames = dirlisting.next()\n\n if not dirnames:\n # No subdirectories\n if filter(partial(_file_belongs_to_project, project), filenames):\n # Translation files found, assume gnu\n return \"gnu\", \"\"\n\n # There are subdirectories\n has_subdirs = filter(\n (lambda dirname: dirname == 'templates'\n or langcode_re.match(dirname)),\n dirnames)\n if has_subdirs:\n return \"nongnu\", None\n\n # No language subdirs found, look for any translation file\n # in subdirs\n for dirpath_, dirnames, filenames in os.walk(proj_trans_path):\n if filter(partial(_file_belongs_to_project, project), filenames):\n return \"gnu\", dirpath_.replace(proj_trans_path, \"\")\n # Unsure\n return \"nongnu\", None\n\n\ndef _get_translation_mapping(project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n old_treestyle, old_path = (\n _detect_treestyle_and_path(project, proj_trans_path)\n if project.treestyle == \"auto\"\n else (project.treestyle, None))\n project.treestyle = \"pootle_fs\"\n if old_treestyle == \"nongnu\":\n return \"/<language_code>/<dir_path>/<filename>.<ext>\"\n else:\n return \"%s/<language_code>.<ext>\" % (old_path)\n\n\ndef _set_project_config(Config, project_ct, project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n configs = Config.objects.filter(\n content_type=project_ct,\n object_pk=project.pk)\n configs.delete()\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_url\",\n defaults=dict(\n value=proj_trans_path))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_type\",\n defaults=dict(\n value=\"localfs\"))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.translation_mappings\",\n defaults=dict(\n value=dict(default=_get_translation_mapping(project))))\n\n\ndef convert_to_localfs(apps, schema_editor):\n Project = apps.get_model(\"pootle_project.Project\")\n Store = apps.get_model(\"pootle_store.Store\")\n StoreFS = apps.get_model(\"pootle_fs.StoreFS\")\n Config = apps.get_model(\"pootle_config.Config\")\n ContentType = apps.get_model(\"contenttypes.ContentType\")\n project_ct = ContentType.objects.get_for_model(Project)\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n\n for project in Project.objects.exclude(treestyle=\"pootle_fs\"):\n proj_trans_path = os.path.join(old_translation_path, project.code)\n proj_stores = Store.objects.filter(\n translation_project__project=project).exclude(file=\"\")\n _set_project_config(Config, project_ct, project)\n project.treestyle = \"pootle_fs\"\n project.save()\n store_fs = StoreFS.objects.filter(\n store__translation_project__project=project)\n store_fs.delete()\n for store in proj_stores:\n filepath = store.file.path[len(proj_trans_path):]\n StoreFS.objects.update_or_create(\n project=project,\n store=store,\n defaults=dict(\n path=filepath,\n pootle_path=store.pootle_path,\n last_sync_revision=store.last_sync_revision,\n last_sync_mtime=store.file_mtime))\n fs_temp = os.path.join(\n settings.POOTLE_FS_WORKING_PATH, project.code)\n dirsync.sync(\n proj_trans_path,\n fs_temp,\n \"sync\",\n create=True,\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ('pootle_fs', '0001_initial'),\n ('pootle_format', '0003_remove_extra_indeces'),\n ('pootle_config', '0001_initial'),\n ('pootle_store', '0013_set_store_filetype_again'),\n ('pootle_project', '0016_change_treestyle_choices_label'),\n ]\n\n operations = [\n migrations.RunPython(convert_to_localfs),\n ]\n", "path": "pootle/apps/pootle_fs/migrations/0002_convert_localfs.py"}]}
2,800
337
gh_patches_debug_1416
rasdani/github-patches
git_diff
frappe__frappe-4871
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `Insufficient Permission for Chat Profile` on load Use Case: 1. Create New User without System Manager Role 2. Log-in to System 3. Error Pops-up when system is trying to create Chat Profile. I tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile. Here is the log. Hope this helps! ![profile](https://user-images.githubusercontent.com/21003054/35091472-ba84aa68-fc77-11e7-860e-789961c1a09e.png) Any help is greatly appreciated. I will also try to create an issue for this here in discuss. Thanks, Dori --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `frappe/chat/doctype/chat_profile/chat_profile.py` Content: ``` 1 # imports - module imports 2 from frappe.model.document import Document 3 from frappe import _ 4 import frappe 5 6 # imports - frappe module imports 7 from frappe.core.doctype.version.version import get_diff 8 from frappe.chat.doctype.chat_room import chat_room 9 from frappe.chat.util import ( 10 safe_json_loads, 11 filter_dict, 12 dictify 13 ) 14 15 session = frappe.session 16 17 class ChatProfile(Document): 18 def before_save(self): 19 if not self.is_new(): 20 self.get_doc_before_save() 21 22 def on_update(self): 23 if not self.is_new(): 24 b, a = self.get_doc_before_save(), self 25 diff = dictify(get_diff(a, b)) 26 if diff: 27 user = session.user 28 29 fields = [changed[0] for changed in diff.changed] 30 31 if 'status' in fields: 32 rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct']) 33 update = dict(user = user, data = dict(status = self.status)) 34 35 for room in rooms: 36 frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True) 37 38 if 'enable_chat' in fields: 39 update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat))) 40 frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True) 41 42 def authenticate(user): 43 if user != session.user: 44 frappe.throw(_("Sorry, you're not authorized.")) 45 46 @frappe.whitelist() 47 def get(user, fields = None): 48 duser = frappe.get_doc('User', user) 49 dprof = frappe.get_doc('Chat Profile', user) 50 51 # If you're adding something here, make sure the client recieves it. 52 profile = dict( 53 # User 54 name = duser.name, 55 email = duser.email, 56 first_name = duser.first_name, 57 last_name = duser.last_name, 58 username = duser.username, 59 avatar = duser.user_image, 60 bio = duser.bio, 61 # Chat Profile 62 status = dprof.status, 63 chat_background = dprof.chat_background, 64 message_preview = bool(dprof.message_preview), 65 notification_tones = bool(dprof.notification_tones), 66 conversation_tones = bool(dprof.conversation_tones), 67 enable_chat = bool(dprof.enable_chat) 68 ) 69 profile = filter_dict(profile, fields) 70 71 return dictify(profile) 72 73 @frappe.whitelist() 74 def create(user, exists_ok = False, fields = None): 75 authenticate(user) 76 77 exists_ok, fields = safe_json_loads(exists_ok, fields) 78 79 if frappe.db.exists('Chat Profile', user): 80 if not exists_ok: 81 frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user))) 82 else: 83 dprof = frappe.new_doc('Chat Profile') 84 dprof.user = user 85 dprof.save() 86 87 profile = get(user, fields = fields) 88 89 return profile 90 91 @frappe.whitelist() 92 def update(user, data): 93 authenticate(user) 94 95 data = safe_json_loads(data) 96 97 dprof = frappe.get_doc('Chat Profile', user) 98 dprof.update(data) 99 dprof.save(ignore_permissions = True) ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py --- a/frappe/chat/doctype/chat_profile/chat_profile.py +++ b/frappe/chat/doctype/chat_profile/chat_profile.py @@ -82,7 +82,7 @@ else: dprof = frappe.new_doc('Chat Profile') dprof.user = user - dprof.save() + dprof.save(ignore_permissions = True) profile = get(user, fields = fields)
{"golden_diff": "diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py\n--- a/frappe/chat/doctype/chat_profile/chat_profile.py\n+++ b/frappe/chat/doctype/chat_profile/chat_profile.py\n@@ -82,7 +82,7 @@\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n- dprof.save()\n+ dprof.save(ignore_permissions = True)\n \n profile = get(user, fields = fields)\n", "issue": "`Insufficient Permission for Chat Profile` on load\nUse Case:\r\n\r\n1. Create New User without System Manager Role\r\n2. Log-in to System\r\n3. Error Pops-up when system is trying to create Chat Profile.\r\n\r\nI tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile.\r\n\r\nHere is the log. Hope this helps!\r\n\r\n![profile](https://user-images.githubusercontent.com/21003054/35091472-ba84aa68-fc77-11e7-860e-789961c1a09e.png)\r\n\r\n\r\nAny help is greatly appreciated. I will also try to create an issue for this here in discuss.\r\n\r\nThanks,\r\n\r\nDori\n", "before_files": [{"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save()\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)", "path": "frappe/chat/doctype/chat_profile/chat_profile.py"}], "after_files": [{"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save(ignore_permissions = True)\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)", "path": "frappe/chat/doctype/chat_profile/chat_profile.py"}]}
1,367
114
gh_patches_debug_23168
rasdani/github-patches
git_diff
feast-dev__feast-983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Go SDK: go get by Version Tag Does Not Work ## Problem & Expected Behavour `go get` to install the Feast Go SDK should work by version tag: ``` go get github.com/feast-dev/feast/sdk/[email protected] ``` Returns: ``` go get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go ``` Instead of installing the Go SDK at release version tag v0.5.1. ## Proposed Solution - Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`) - Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/setup.py` Content: ``` 1 # Copyright 2019 The Feast Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import subprocess 17 18 from setuptools import find_packages, setup 19 20 NAME = "feast" 21 DESCRIPTION = "Python SDK for Feast" 22 URL = "https://github.com/feast-dev/feast" 23 AUTHOR = "Feast" 24 REQUIRES_PYTHON = ">=3.6.0" 25 26 REQUIRED = [ 27 "Click==7.*", 28 "google-api-core==1.20.*", 29 "google-auth<2.0dev,>=1.14.0", 30 "google-cloud-bigquery==1.18.*", 31 "google-cloud-storage==1.20.*", 32 "google-cloud-core==1.0.*", 33 "googleapis-common-protos==1.*", 34 "google-cloud-bigquery-storage==0.7.*", 35 "grpcio==1.*", 36 "pandas~=1.0.0", 37 "pandavro==1.5.*", 38 "protobuf>=3.10", 39 "PyYAML==5.1.*", 40 "fastavro>=0.22.11,<0.23", 41 "kafka-python==1.*", 42 "tabulate==0.8.*", 43 "toml==0.10.*", 44 "tqdm==4.*", 45 "pyarrow<0.16.0,>=0.15.1", 46 "numpy", 47 "google", 48 "confluent_kafka", 49 ] 50 51 # README file from Feast repo root directory 52 repo_root = ( 53 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE) 54 .communicate()[0] 55 .rstrip() 56 .decode("utf-8") 57 ) 58 README_FILE = os.path.join(repo_root, "README.md") 59 with open(os.path.join(README_FILE), "r") as f: 60 LONG_DESCRIPTION = f.read() 61 62 setup( 63 name=NAME, 64 author=AUTHOR, 65 description=DESCRIPTION, 66 long_description=LONG_DESCRIPTION, 67 long_description_content_type="text/markdown", 68 python_requires=REQUIRES_PYTHON, 69 url=URL, 70 packages=find_packages(exclude=("tests",)), 71 install_requires=REQUIRED, 72 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements 73 # Install dev requirements with: pip install -e .[dev] 74 extras_require={"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"]}, 75 include_package_data=True, 76 license="Apache", 77 classifiers=[ 78 # Trove classifiers 79 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 80 "License :: OSI Approved :: Apache Software License", 81 "Programming Language :: Python", 82 "Programming Language :: Python :: 3", 83 "Programming Language :: Python :: 3.6", 84 ], 85 entry_points={"console_scripts": ["feast=feast.cli:cli"]}, 86 use_scm_version={"root": "../..", "relative_to": __file__}, 87 setup_requires=["setuptools_scm"], 88 ) 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/setup.py b/sdk/python/setup.py --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import re import subprocess from setuptools import find_packages, setup @@ -59,6 +60,13 @@ with open(os.path.join(README_FILE), "r") as f: LONG_DESCRIPTION = f.read() +# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm. +# Regex modified from default tag regex in: +# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9 +TAG_REGEX = re.compile( + r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$" +) + setup( name=NAME, author=AUTHOR, @@ -83,6 +91,6 @@ "Programming Language :: Python :: 3.6", ], entry_points={"console_scripts": ["feast=feast.cli:cli"]}, - use_scm_version={"root": "../..", "relative_to": __file__}, + use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX}, setup_requires=["setuptools_scm"], )
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import os\n+import re\n import subprocess\n \n from setuptools import find_packages, setup\n@@ -59,6 +60,13 @@\n with open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n \n+# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n+# Regex modified from default tag regex in:\n+# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\n+TAG_REGEX = re.compile(\n+ r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n+)\n+\n setup(\n name=NAME,\n author=AUTHOR,\n@@ -83,6 +91,6 @@\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n- use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n+ use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\"],\n )\n", "issue": "Go SDK: go get by Version Tag Does Not Work\n## Problem & Expected Behavour\r\n`go get` to install the Feast Go SDK should work by version tag:\r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]\r\n```\r\nReturns: \r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go\r\n```\r\nInstead of installing the Go SDK at release version tag v0.5.1.\r\n\r\n## Proposed Solution\r\n- Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`)\r\n- Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories)\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"google-api-core==1.20.*\",\n \"google-auth<2.0dev,>=1.14.0\",\n \"google-cloud-bigquery==1.18.*\",\n \"google-cloud-storage==1.20.*\",\n \"google-cloud-core==1.0.*\",\n \"googleapis-common-protos==1.*\",\n \"google-cloud-bigquery-storage==0.7.*\",\n \"grpcio==1.*\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"PyYAML==5.1.*\",\n \"fastavro>=0.22.11,<0.23\",\n \"kafka-python==1.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"pyarrow<0.16.0,>=0.15.1\",\n \"numpy\",\n \"google\",\n \"confluent_kafka\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"]},\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"google-api-core==1.20.*\",\n \"google-auth<2.0dev,>=1.14.0\",\n \"google-cloud-bigquery==1.18.*\",\n \"google-cloud-storage==1.20.*\",\n \"google-cloud-core==1.0.*\",\n \"googleapis-common-protos==1.*\",\n \"google-cloud-bigquery-storage==0.7.*\",\n \"grpcio==1.*\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"PyYAML==5.1.*\",\n \"fastavro>=0.22.11,<0.23\",\n \"kafka-python==1.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"pyarrow<0.16.0,>=0.15.1\",\n \"numpy\",\n \"google\",\n \"confluent_kafka\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"]},\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "sdk/python/setup.py"}]}
1,500
358
gh_patches_debug_34656
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-3954
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [8tracks] throws 403 error after first 4 songs. Nothing downloads. 8tracks recently updated their API to version 3. They probably updated their security on this (because, according to their license, you can only skip 3 songs within a certain time frame). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `youtube_dl/extractor/eighttracks.py` Content: ``` 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import json 5 import random 6 import re 7 8 from .common import InfoExtractor 9 from ..utils import ( 10 compat_str, 11 ) 12 13 14 class EightTracksIE(InfoExtractor): 15 IE_NAME = '8tracks' 16 _VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$' 17 _TEST = { 18 "name": "EightTracks", 19 "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a", 20 "info_dict": { 21 'id': '1336550', 22 'display_id': 'youtube-dl-test-tracks-a', 23 "description": "test chars: \"'/\\ä↭", 24 "title": "youtube-dl test tracks \"'/\\ä↭<>", 25 }, 26 "playlist": [ 27 { 28 "md5": "96ce57f24389fc8734ce47f4c1abcc55", 29 "info_dict": { 30 "id": "11885610", 31 "ext": "m4a", 32 "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad", 33 "uploader_id": "ytdl" 34 } 35 }, 36 { 37 "md5": "4ab26f05c1f7291ea460a3920be8021f", 38 "info_dict": { 39 "id": "11885608", 40 "ext": "m4a", 41 "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad", 42 "uploader_id": "ytdl" 43 } 44 }, 45 { 46 "md5": "d30b5b5f74217410f4689605c35d1fd7", 47 "info_dict": { 48 "id": "11885679", 49 "ext": "m4a", 50 "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad", 51 "uploader_id": "ytdl" 52 } 53 }, 54 { 55 "md5": "4eb0a669317cd725f6bbd336a29f923a", 56 "info_dict": { 57 "id": "11885680", 58 "ext": "m4a", 59 "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad", 60 "uploader_id": "ytdl" 61 } 62 }, 63 { 64 "md5": "1893e872e263a2705558d1d319ad19e8", 65 "info_dict": { 66 "id": "11885682", 67 "ext": "m4a", 68 "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad", 69 "uploader_id": "ytdl" 70 } 71 }, 72 { 73 "md5": "b673c46f47a216ab1741ae8836af5899", 74 "info_dict": { 75 "id": "11885683", 76 "ext": "m4a", 77 "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad", 78 "uploader_id": "ytdl" 79 } 80 }, 81 { 82 "md5": "1d74534e95df54986da7f5abf7d842b7", 83 "info_dict": { 84 "id": "11885684", 85 "ext": "m4a", 86 "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad", 87 "uploader_id": "ytdl" 88 } 89 }, 90 { 91 "md5": "f081f47af8f6ae782ed131d38b9cd1c0", 92 "info_dict": { 93 "id": "11885685", 94 "ext": "m4a", 95 "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad", 96 "uploader_id": "ytdl" 97 } 98 } 99 ] 100 } 101 102 def _real_extract(self, url): 103 mobj = re.match(self._VALID_URL, url) 104 playlist_id = mobj.group('id') 105 106 webpage = self._download_webpage(url, playlist_id) 107 108 json_like = self._search_regex( 109 r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information') 110 data = json.loads(json_like) 111 112 session = str(random.randint(0, 1000000000)) 113 mix_id = data['id'] 114 track_count = data['tracks_count'] 115 first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id) 116 next_url = first_url 117 entries = [] 118 for i in range(track_count): 119 api_json = self._download_webpage( 120 next_url, playlist_id, 121 note='Downloading song information %d/%d' % (i + 1, track_count), 122 errnote='Failed to download song information') 123 api_data = json.loads(api_json) 124 track_data = api_data['set']['track'] 125 info = { 126 'id': compat_str(track_data['id']), 127 'url': track_data['track_file_stream_url'], 128 'title': track_data['performer'] + u' - ' + track_data['name'], 129 'raw_title': track_data['name'], 130 'uploader_id': data['user']['login'], 131 'ext': 'm4a', 132 } 133 entries.append(info) 134 next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % ( 135 session, mix_id, track_data['id']) 136 return { 137 '_type': 'playlist', 138 'entries': entries, 139 'id': compat_str(mix_id), 140 'display_id': playlist_id, 141 'title': data.get('name'), 142 'description': data.get('description'), 143 } 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py --- a/youtube_dl/extractor/eighttracks.py +++ b/youtube_dl/extractor/eighttracks.py @@ -4,10 +4,12 @@ import json import random import re +import time from .common import InfoExtractor from ..utils import ( compat_str, + ExtractorError, ) @@ -112,14 +114,30 @@ session = str(random.randint(0, 1000000000)) mix_id = data['id'] track_count = data['tracks_count'] + duration = data['duration'] + avg_song_duration = duration / track_count first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id) next_url = first_url entries = [] + for i in range(track_count): - api_json = self._download_webpage( - next_url, playlist_id, - note='Downloading song information %d/%d' % (i + 1, track_count), - errnote='Failed to download song information') + + api_json = None + download_tries = 0 + + while api_json is None: + try: + api_json = self._download_webpage( + next_url, playlist_id, + note='Downloading song information %d/%d' % (i + 1, track_count), + errnote='Failed to download song information') + except ExtractorError: + if download_tries > 3: + raise + else: + download_tries += 1 + time.sleep(avg_song_duration) + api_data = json.loads(api_json) track_data = api_data['set']['track'] info = { @@ -131,6 +149,7 @@ 'ext': 'm4a', } entries.append(info) + next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % ( session, mix_id, track_data['id']) return {
{"golden_diff": "diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py\n--- a/youtube_dl/extractor/eighttracks.py\n+++ b/youtube_dl/extractor/eighttracks.py\n@@ -4,10 +4,12 @@\n import json\n import random\n import re\n+import time\n \n from .common import InfoExtractor\n from ..utils import (\n compat_str,\n+ ExtractorError,\n )\n \n \n@@ -112,14 +114,30 @@\n session = str(random.randint(0, 1000000000))\n mix_id = data['id']\n track_count = data['tracks_count']\n+ duration = data['duration']\n+ avg_song_duration = duration / track_count\n first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)\n next_url = first_url\n entries = []\n+\n for i in range(track_count):\n- api_json = self._download_webpage(\n- next_url, playlist_id,\n- note='Downloading song information %d/%d' % (i + 1, track_count),\n- errnote='Failed to download song information')\n+ \n+ api_json = None\n+ download_tries = 0\n+\n+ while api_json is None:\n+ try:\n+ api_json = self._download_webpage(\n+ next_url, playlist_id,\n+ note='Downloading song information %d/%d' % (i + 1, track_count),\n+ errnote='Failed to download song information')\n+ except ExtractorError:\n+ if download_tries > 3:\n+ raise\n+ else:\n+ download_tries += 1\n+ time.sleep(avg_song_duration)\n+\n api_data = json.loads(api_json)\n track_data = api_data['set']['track']\n info = {\n@@ -131,6 +149,7 @@\n 'ext': 'm4a',\n }\n entries.append(info)\n+\n next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (\n session, mix_id, track_data['id'])\n return {\n", "issue": "[8tracks] throws 403 error after first 4 songs. Nothing downloads.\n8tracks recently updated their API to version 3. They probably updated their security on this (because, according to their license, you can only skip 3 songs within a certain time frame).\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport random\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n compat_str,\n)\n\n\nclass EightTracksIE(InfoExtractor):\n IE_NAME = '8tracks'\n _VALID_URL = r'https?://8tracks\\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'\n _TEST = {\n \"name\": \"EightTracks\",\n \"url\": \"http://8tracks.com/ytdl/youtube-dl-test-tracks-a\",\n \"info_dict\": {\n 'id': '1336550',\n 'display_id': 'youtube-dl-test-tracks-a',\n \"description\": \"test chars: \\\"'/\\\\\u00e4\u21ad\",\n \"title\": \"youtube-dl test tracks \\\"'/\\\\\u00e4\u21ad<>\",\n },\n \"playlist\": [\n {\n \"md5\": \"96ce57f24389fc8734ce47f4c1abcc55\",\n \"info_dict\": {\n \"id\": \"11885610\",\n \"ext\": \"m4a\",\n \"title\": \"youtue-dl project<>\\\"' - youtube-dl test track 1 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4ab26f05c1f7291ea460a3920be8021f\",\n \"info_dict\": {\n \"id\": \"11885608\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project - youtube-dl test track 2 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"d30b5b5f74217410f4689605c35d1fd7\",\n \"info_dict\": {\n \"id\": \"11885679\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 3 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4eb0a669317cd725f6bbd336a29f923a\",\n \"info_dict\": {\n \"id\": \"11885680\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 4 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1893e872e263a2705558d1d319ad19e8\",\n \"info_dict\": {\n \"id\": \"11885682\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 5 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"b673c46f47a216ab1741ae8836af5899\",\n \"info_dict\": {\n \"id\": \"11885683\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 6 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1d74534e95df54986da7f5abf7d842b7\",\n \"info_dict\": {\n \"id\": \"11885684\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 7 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"f081f47af8f6ae782ed131d38b9cd1c0\",\n \"info_dict\": {\n \"id\": \"11885685\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 8 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n }\n ]\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n playlist_id = mobj.group('id')\n\n webpage = self._download_webpage(url, playlist_id)\n\n json_like = self._search_regex(\n r\"(?s)PAGE.mix = (.*?);\\n\", webpage, 'trax information')\n data = json.loads(json_like)\n\n session = str(random.randint(0, 1000000000))\n mix_id = data['id']\n track_count = data['tracks_count']\n first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)\n next_url = first_url\n entries = []\n for i in range(track_count):\n api_json = self._download_webpage(\n next_url, playlist_id,\n note='Downloading song information %d/%d' % (i + 1, track_count),\n errnote='Failed to download song information')\n api_data = json.loads(api_json)\n track_data = api_data['set']['track']\n info = {\n 'id': compat_str(track_data['id']),\n 'url': track_data['track_file_stream_url'],\n 'title': track_data['performer'] + u' - ' + track_data['name'],\n 'raw_title': track_data['name'],\n 'uploader_id': data['user']['login'],\n 'ext': 'm4a',\n }\n entries.append(info)\n next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (\n session, mix_id, track_data['id'])\n return {\n '_type': 'playlist',\n 'entries': entries,\n 'id': compat_str(mix_id),\n 'display_id': playlist_id,\n 'title': data.get('name'),\n 'description': data.get('description'),\n }\n", "path": "youtube_dl/extractor/eighttracks.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport random\nimport re\nimport time\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n compat_str,\n ExtractorError,\n)\n\n\nclass EightTracksIE(InfoExtractor):\n IE_NAME = '8tracks'\n _VALID_URL = r'https?://8tracks\\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'\n _TEST = {\n \"name\": \"EightTracks\",\n \"url\": \"http://8tracks.com/ytdl/youtube-dl-test-tracks-a\",\n \"info_dict\": {\n 'id': '1336550',\n 'display_id': 'youtube-dl-test-tracks-a',\n \"description\": \"test chars: \\\"'/\\\\\u00e4\u21ad\",\n \"title\": \"youtube-dl test tracks \\\"'/\\\\\u00e4\u21ad<>\",\n },\n \"playlist\": [\n {\n \"md5\": \"96ce57f24389fc8734ce47f4c1abcc55\",\n \"info_dict\": {\n \"id\": \"11885610\",\n \"ext\": \"m4a\",\n \"title\": \"youtue-dl project<>\\\"' - youtube-dl test track 1 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4ab26f05c1f7291ea460a3920be8021f\",\n \"info_dict\": {\n \"id\": \"11885608\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project - youtube-dl test track 2 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"d30b5b5f74217410f4689605c35d1fd7\",\n \"info_dict\": {\n \"id\": \"11885679\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 3 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4eb0a669317cd725f6bbd336a29f923a\",\n \"info_dict\": {\n \"id\": \"11885680\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 4 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1893e872e263a2705558d1d319ad19e8\",\n \"info_dict\": {\n \"id\": \"11885682\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 5 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"b673c46f47a216ab1741ae8836af5899\",\n \"info_dict\": {\n \"id\": \"11885683\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 6 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1d74534e95df54986da7f5abf7d842b7\",\n \"info_dict\": {\n \"id\": \"11885684\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 7 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"f081f47af8f6ae782ed131d38b9cd1c0\",\n \"info_dict\": {\n \"id\": \"11885685\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 8 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n }\n ]\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n playlist_id = mobj.group('id')\n\n webpage = self._download_webpage(url, playlist_id)\n\n json_like = self._search_regex(\n r\"(?s)PAGE.mix = (.*?);\\n\", webpage, 'trax information')\n data = json.loads(json_like)\n\n session = str(random.randint(0, 1000000000))\n mix_id = data['id']\n track_count = data['tracks_count']\n duration = data['duration']\n avg_song_duration = duration / track_count\n first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)\n next_url = first_url\n entries = []\n\n for i in range(track_count):\n \n api_json = None\n download_tries = 0\n\n while api_json is None:\n try:\n api_json = self._download_webpage(\n next_url, playlist_id,\n note='Downloading song information %d/%d' % (i + 1, track_count),\n errnote='Failed to download song information')\n except ExtractorError:\n if download_tries > 3:\n raise\n else:\n download_tries += 1\n time.sleep(avg_song_duration)\n\n api_data = json.loads(api_json)\n track_data = api_data['set']['track']\n info = {\n 'id': compat_str(track_data['id']),\n 'url': track_data['track_file_stream_url'],\n 'title': track_data['performer'] + u' - ' + track_data['name'],\n 'raw_title': track_data['name'],\n 'uploader_id': data['user']['login'],\n 'ext': 'm4a',\n }\n entries.append(info)\n\n next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (\n session, mix_id, track_data['id'])\n return {\n '_type': 'playlist',\n 'entries': entries,\n 'id': compat_str(mix_id),\n 'display_id': playlist_id,\n 'title': data.get('name'),\n 'description': data.get('description'),\n }\n", "path": "youtube_dl/extractor/eighttracks.py"}]}
2,179
514
gh_patches_debug_38459
rasdani/github-patches
git_diff
kornia__kornia-2235
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use more pythonic expressions in rgb to hls Make improvements in the `kornia/color/hls.py` module to use more pythonic expressions. Ex: ```diff - r = torch.select(im, -3, 0) + r = im[…, 0, :,:] ``` _Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2030#discussion_r1032065189_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kornia/color/hls.py` Content: ``` 1 import math 2 from typing import Tuple 3 4 import torch 5 6 from kornia.core import Module, Tensor, stack, tensor, where 7 8 9 def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor: 10 r"""Convert a RGB image to HLS. 11 12 .. image:: _static/img/rgb_to_hls.png 13 14 The image data is assumed to be in the range of (0, 1). 15 16 NOTE: this method cannot be compiled with JIT in pytohrch < 1.7.0 17 18 Args: 19 image: RGB image to be converted to HLS with shape :math:`(*, 3, H, W)`. 20 eps: epsilon value to avoid div by zero. 21 22 Returns: 23 HLS version of the image with shape :math:`(*, 3, H, W)`. 24 25 Example: 26 >>> input = torch.rand(2, 3, 4, 5) 27 >>> output = rgb_to_hls(input) # 2x3x4x5 28 """ 29 if not isinstance(image, Tensor): 30 raise TypeError(f"Input type is not a Tensor. Got {type(image)}") 31 32 if len(image.shape) < 3 or image.shape[-3] != 3: 33 raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}") 34 35 _RGB2HSL_IDX = tensor([[[0.0]], [[1.0]], [[2.0]]], device=image.device, dtype=image.dtype) # 3x1x1 36 37 _img_max: Tuple[Tensor, Tensor] = image.max(-3) 38 maxc = _img_max[0] 39 imax = _img_max[1] 40 minc: Tensor = image.min(-3)[0] 41 42 if image.requires_grad: 43 l_ = maxc + minc 44 s = maxc - minc 45 # weird behaviour with undefined vars in JIT... 46 # scripting requires image_hls be defined even if it is not used :S 47 h = l_ # assign to any tensor... 48 image_hls = l_ # assign to any tensor... 49 else: 50 # define the resulting image to avoid the torch.stack([h, l, s]) 51 # so, h, l and s require inplace operations 52 # NOTE: stack() increases in a 10% the cost in colab 53 image_hls = torch.empty_like(image) 54 h = torch.select(image_hls, -3, 0) 55 l_ = torch.select(image_hls, -3, 1) 56 s = torch.select(image_hls, -3, 2) 57 torch.add(maxc, minc, out=l_) # l = max + min 58 torch.sub(maxc, minc, out=s) # s = max - min 59 60 # precompute image / (max - min) 61 im = image / (s + eps).unsqueeze(-3) 62 63 # epsilon cannot be inside the torch.where to avoid precision issues 64 s /= where(l_ < 1.0, l_, 2.0 - l_) + eps # saturation 65 l_ /= 2 # luminance 66 67 # note that r,g and b were previously div by (max - min) 68 r = torch.select(im, -3, 0) 69 g = torch.select(im, -3, 1) 70 b = torch.select(im, -3, 2) 71 # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0] 72 # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1] 73 # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2] 74 cond = imax.unsqueeze(-3) == _RGB2HSL_IDX 75 if image.requires_grad: 76 h = torch.mul((g - b) % 6, torch.select(cond, -3, 0)) 77 else: 78 torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h) 79 h += torch.add(b - r, 2) * torch.select(cond, -3, 1) 80 h += torch.add(r - g, 4) * torch.select(cond, -3, 2) 81 # h = 2.0 * math.pi * (60.0 * h) / 360.0 82 h *= math.pi / 3.0 # hue [0, 2*pi] 83 84 if image.requires_grad: 85 return stack([h, l_, s], -3) 86 return image_hls 87 88 89 def hls_to_rgb(image: Tensor) -> Tensor: 90 r"""Convert a HLS image to RGB. 91 92 The image data is assumed to be in the range of (0, 1). 93 94 Args: 95 image: HLS image to be converted to RGB with shape :math:`(*, 3, H, W)`. 96 97 Returns: 98 RGB version of the image with shape :math:`(*, 3, H, W)`. 99 100 Example: 101 >>> input = torch.rand(2, 3, 4, 5) 102 >>> output = hls_to_rgb(input) # 2x3x4x5 103 """ 104 if not isinstance(image, Tensor): 105 raise TypeError(f"Input type is not a Tensor. Got {type(image)}") 106 107 if len(image.shape) < 3 or image.shape[-3] != 3: 108 raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}") 109 110 _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1 111 112 im: Tensor = image.unsqueeze(-4) 113 h: Tensor = torch.select(im, -3, 0) 114 l: Tensor = torch.select(im, -3, 1) 115 s: Tensor = torch.select(im, -3, 2) 116 h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30 117 a = s * torch.min(l, 1.0 - l) 118 119 # kr = (0 + h) % 12 120 # kg = (8 + h) % 12 121 # kb = (4 + h) % 12 122 k: Tensor = (h + _HLS2RGB) % 12 123 124 # l - a * max(min(min(k - 3.0, 9.0 - k), 1), -1) 125 mink = torch.min(k - 3.0, 9.0 - k) 126 return torch.addcmul(l, a, mink.clamp_(min=-1.0, max=1.0), value=-1) 127 128 129 class RgbToHls(Module): 130 r"""Convert an image from RGB to HLS. 131 132 The image data is assumed to be in the range of (0, 1). 133 134 Returns: 135 HLS version of the image. 136 137 Shape: 138 - image: :math:`(*, 3, H, W)` 139 - output: :math:`(*, 3, H, W)` 140 141 Examples: 142 >>> input = torch.rand(2, 3, 4, 5) 143 >>> hls = RgbToHls() 144 >>> output = hls(input) # 2x3x4x5 145 """ 146 147 def forward(self, image: Tensor) -> Tensor: 148 return rgb_to_hls(image) 149 150 151 class HlsToRgb(Module): 152 r"""Convert an image from HLS to RGB. 153 154 The image data is assumed to be in the range of (0, 1). 155 156 Returns: 157 RGB version of the image. 158 159 Shape: 160 - input: :math:`(*, 3, H, W)` 161 - output: :math:`(*, 3, H, W)` 162 163 Reference: 164 https://en.wikipedia.org/wiki/HSL_and_HSV 165 166 Examples: 167 >>> input = torch.rand(2, 3, 4, 5) 168 >>> rgb = HlsToRgb() 169 >>> output = rgb(input) # 2x3x4x5 170 """ 171 172 def forward(self, image: Tensor) -> Tensor: 173 return hls_to_rgb(image) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kornia/color/hls.py b/kornia/color/hls.py --- a/kornia/color/hls.py +++ b/kornia/color/hls.py @@ -7,7 +7,7 @@ def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor: - r"""Convert a RGB image to HLS. + r"""Convert an RGB image to HLS. .. image:: _static/img/rgb_to_hls.png @@ -51,9 +51,7 @@ # so, h, l and s require inplace operations # NOTE: stack() increases in a 10% the cost in colab image_hls = torch.empty_like(image) - h = torch.select(image_hls, -3, 0) - l_ = torch.select(image_hls, -3, 1) - s = torch.select(image_hls, -3, 2) + h, l_, s = image_hls[..., 0, :, :], image_hls[..., 1, :, :], image_hls[..., 2, :, :] torch.add(maxc, minc, out=l_) # l = max + min torch.sub(maxc, minc, out=s) # s = max - min @@ -65,19 +63,18 @@ l_ /= 2 # luminance # note that r,g and b were previously div by (max - min) - r = torch.select(im, -3, 0) - g = torch.select(im, -3, 1) - b = torch.select(im, -3, 2) + r, g, b = im[..., 0, :, :], im[..., 1, :, :], im[..., 2, :, :] # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0] # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1] # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2] - cond = imax.unsqueeze(-3) == _RGB2HSL_IDX + cond = imax[..., None, :, :] == _RGB2HSL_IDX if image.requires_grad: - h = torch.mul((g - b) % 6, torch.select(cond, -3, 0)) + h = ((g - b) % 6) * cond[..., 0, :, :] else: - torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h) - h += torch.add(b - r, 2) * torch.select(cond, -3, 1) - h += torch.add(r - g, 4) * torch.select(cond, -3, 2) + # replacing `torch.mul` with `out=h` with python * operator gives wrong results + torch.mul((g - b) % 6, cond[..., 0, :, :], out=h) + h += (b - r + 2) * cond[..., 1, :, :] + h += (r - g + 4) * cond[..., 2, :, :] # h = 2.0 * math.pi * (60.0 * h) / 360.0 h *= math.pi / 3.0 # hue [0, 2*pi] @@ -110,9 +107,9 @@ _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1 im: Tensor = image.unsqueeze(-4) - h: Tensor = torch.select(im, -3, 0) - l: Tensor = torch.select(im, -3, 1) - s: Tensor = torch.select(im, -3, 2) + h: Tensor = im[..., 0, :, :] + l: Tensor = im[..., 1, :, :] + s: Tensor = im[..., 2, :, :] h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30 a = s * torch.min(l, 1.0 - l)
{"golden_diff": "diff --git a/kornia/color/hls.py b/kornia/color/hls.py\n--- a/kornia/color/hls.py\n+++ b/kornia/color/hls.py\n@@ -7,7 +7,7 @@\n \n \n def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:\n- r\"\"\"Convert a RGB image to HLS.\n+ r\"\"\"Convert an RGB image to HLS.\n \n .. image:: _static/img/rgb_to_hls.png\n \n@@ -51,9 +51,7 @@\n # so, h, l and s require inplace operations\n # NOTE: stack() increases in a 10% the cost in colab\n image_hls = torch.empty_like(image)\n- h = torch.select(image_hls, -3, 0)\n- l_ = torch.select(image_hls, -3, 1)\n- s = torch.select(image_hls, -3, 2)\n+ h, l_, s = image_hls[..., 0, :, :], image_hls[..., 1, :, :], image_hls[..., 2, :, :]\n torch.add(maxc, minc, out=l_) # l = max + min\n torch.sub(maxc, minc, out=s) # s = max - min\n \n@@ -65,19 +63,18 @@\n l_ /= 2 # luminance\n \n # note that r,g and b were previously div by (max - min)\n- r = torch.select(im, -3, 0)\n- g = torch.select(im, -3, 1)\n- b = torch.select(im, -3, 2)\n+ r, g, b = im[..., 0, :, :], im[..., 1, :, :], im[..., 2, :, :]\n # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]\n # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]\n # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]\n- cond = imax.unsqueeze(-3) == _RGB2HSL_IDX\n+ cond = imax[..., None, :, :] == _RGB2HSL_IDX\n if image.requires_grad:\n- h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))\n+ h = ((g - b) % 6) * cond[..., 0, :, :]\n else:\n- torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)\n- h += torch.add(b - r, 2) * torch.select(cond, -3, 1)\n- h += torch.add(r - g, 4) * torch.select(cond, -3, 2)\n+ # replacing `torch.mul` with `out=h` with python * operator gives wrong results\n+ torch.mul((g - b) % 6, cond[..., 0, :, :], out=h)\n+ h += (b - r + 2) * cond[..., 1, :, :]\n+ h += (r - g + 4) * cond[..., 2, :, :]\n # h = 2.0 * math.pi * (60.0 * h) / 360.0\n h *= math.pi / 3.0 # hue [0, 2*pi]\n \n@@ -110,9 +107,9 @@\n _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n \n im: Tensor = image.unsqueeze(-4)\n- h: Tensor = torch.select(im, -3, 0)\n- l: Tensor = torch.select(im, -3, 1)\n- s: Tensor = torch.select(im, -3, 2)\n+ h: Tensor = im[..., 0, :, :]\n+ l: Tensor = im[..., 1, :, :]\n+ s: Tensor = im[..., 2, :, :]\n h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30\n a = s * torch.min(l, 1.0 - l)\n", "issue": "Use more pythonic expressions in rgb to hls\nMake improvements in the `kornia/color/hls.py` module to use more pythonic expressions. Ex:\r\n```diff\r\n- r = torch.select(im, -3, 0)\r\n+ r = im[\u2026, 0, :,:]\r\n```\r\n\r\n\r\n_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2030#discussion_r1032065189_\r\n \n", "before_files": [{"content": "import math\nfrom typing import Tuple\n\nimport torch\n\nfrom kornia.core import Module, Tensor, stack, tensor, where\n\n\ndef rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:\n r\"\"\"Convert a RGB image to HLS.\n\n .. image:: _static/img/rgb_to_hls.png\n\n The image data is assumed to be in the range of (0, 1).\n\n NOTE: this method cannot be compiled with JIT in pytohrch < 1.7.0\n\n Args:\n image: RGB image to be converted to HLS with shape :math:`(*, 3, H, W)`.\n eps: epsilon value to avoid div by zero.\n\n Returns:\n HLS version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = rgb_to_hls(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _RGB2HSL_IDX = tensor([[[0.0]], [[1.0]], [[2.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n _img_max: Tuple[Tensor, Tensor] = image.max(-3)\n maxc = _img_max[0]\n imax = _img_max[1]\n minc: Tensor = image.min(-3)[0]\n\n if image.requires_grad:\n l_ = maxc + minc\n s = maxc - minc\n # weird behaviour with undefined vars in JIT...\n # scripting requires image_hls be defined even if it is not used :S\n h = l_ # assign to any tensor...\n image_hls = l_ # assign to any tensor...\n else:\n # define the resulting image to avoid the torch.stack([h, l, s])\n # so, h, l and s require inplace operations\n # NOTE: stack() increases in a 10% the cost in colab\n image_hls = torch.empty_like(image)\n h = torch.select(image_hls, -3, 0)\n l_ = torch.select(image_hls, -3, 1)\n s = torch.select(image_hls, -3, 2)\n torch.add(maxc, minc, out=l_) # l = max + min\n torch.sub(maxc, minc, out=s) # s = max - min\n\n # precompute image / (max - min)\n im = image / (s + eps).unsqueeze(-3)\n\n # epsilon cannot be inside the torch.where to avoid precision issues\n s /= where(l_ < 1.0, l_, 2.0 - l_) + eps # saturation\n l_ /= 2 # luminance\n\n # note that r,g and b were previously div by (max - min)\n r = torch.select(im, -3, 0)\n g = torch.select(im, -3, 1)\n b = torch.select(im, -3, 2)\n # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]\n # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]\n # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]\n cond = imax.unsqueeze(-3) == _RGB2HSL_IDX\n if image.requires_grad:\n h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))\n else:\n torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)\n h += torch.add(b - r, 2) * torch.select(cond, -3, 1)\n h += torch.add(r - g, 4) * torch.select(cond, -3, 2)\n # h = 2.0 * math.pi * (60.0 * h) / 360.0\n h *= math.pi / 3.0 # hue [0, 2*pi]\n\n if image.requires_grad:\n return stack([h, l_, s], -3)\n return image_hls\n\n\ndef hls_to_rgb(image: Tensor) -> Tensor:\n r\"\"\"Convert a HLS image to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Args:\n image: HLS image to be converted to RGB with shape :math:`(*, 3, H, W)`.\n\n Returns:\n RGB version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = hls_to_rgb(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n im: Tensor = image.unsqueeze(-4)\n h: Tensor = torch.select(im, -3, 0)\n l: Tensor = torch.select(im, -3, 1)\n s: Tensor = torch.select(im, -3, 2)\n h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30\n a = s * torch.min(l, 1.0 - l)\n\n # kr = (0 + h) % 12\n # kg = (8 + h) % 12\n # kb = (4 + h) % 12\n k: Tensor = (h + _HLS2RGB) % 12\n\n # l - a * max(min(min(k - 3.0, 9.0 - k), 1), -1)\n mink = torch.min(k - 3.0, 9.0 - k)\n return torch.addcmul(l, a, mink.clamp_(min=-1.0, max=1.0), value=-1)\n\n\nclass RgbToHls(Module):\n r\"\"\"Convert an image from RGB to HLS.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n HLS version of the image.\n\n Shape:\n - image: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> hls = RgbToHls()\n >>> output = hls(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return rgb_to_hls(image)\n\n\nclass HlsToRgb(Module):\n r\"\"\"Convert an image from HLS to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n RGB version of the image.\n\n Shape:\n - input: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Reference:\n https://en.wikipedia.org/wiki/HSL_and_HSV\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> rgb = HlsToRgb()\n >>> output = rgb(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return hls_to_rgb(image)\n", "path": "kornia/color/hls.py"}], "after_files": [{"content": "import math\nfrom typing import Tuple\n\nimport torch\n\nfrom kornia.core import Module, Tensor, stack, tensor, where\n\n\ndef rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:\n r\"\"\"Convert an RGB image to HLS.\n\n .. image:: _static/img/rgb_to_hls.png\n\n The image data is assumed to be in the range of (0, 1).\n\n NOTE: this method cannot be compiled with JIT in pytohrch < 1.7.0\n\n Args:\n image: RGB image to be converted to HLS with shape :math:`(*, 3, H, W)`.\n eps: epsilon value to avoid div by zero.\n\n Returns:\n HLS version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = rgb_to_hls(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _RGB2HSL_IDX = tensor([[[0.0]], [[1.0]], [[2.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n _img_max: Tuple[Tensor, Tensor] = image.max(-3)\n maxc = _img_max[0]\n imax = _img_max[1]\n minc: Tensor = image.min(-3)[0]\n\n if image.requires_grad:\n l_ = maxc + minc\n s = maxc - minc\n # weird behaviour with undefined vars in JIT...\n # scripting requires image_hls be defined even if it is not used :S\n h = l_ # assign to any tensor...\n image_hls = l_ # assign to any tensor...\n else:\n # define the resulting image to avoid the torch.stack([h, l, s])\n # so, h, l and s require inplace operations\n # NOTE: stack() increases in a 10% the cost in colab\n image_hls = torch.empty_like(image)\n h, l_, s = image_hls[..., 0, :, :], image_hls[..., 1, :, :], image_hls[..., 2, :, :]\n torch.add(maxc, minc, out=l_) # l = max + min\n torch.sub(maxc, minc, out=s) # s = max - min\n\n # precompute image / (max - min)\n im = image / (s + eps).unsqueeze(-3)\n\n # epsilon cannot be inside the torch.where to avoid precision issues\n s /= where(l_ < 1.0, l_, 2.0 - l_) + eps # saturation\n l_ /= 2 # luminance\n\n # note that r,g and b were previously div by (max - min)\n r, g, b = im[..., 0, :, :], im[..., 1, :, :], im[..., 2, :, :]\n # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]\n # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]\n # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]\n cond = imax[..., None, :, :] == _RGB2HSL_IDX\n if image.requires_grad:\n h = ((g - b) % 6) * cond[..., 0, :, :]\n else:\n # replacing `torch.mul` with `out=h` with python * operator gives wrong results\n torch.mul((g - b) % 6, cond[..., 0, :, :], out=h)\n h += (b - r + 2) * cond[..., 1, :, :]\n h += (r - g + 4) * cond[..., 2, :, :]\n # h = 2.0 * math.pi * (60.0 * h) / 360.0\n h *= math.pi / 3.0 # hue [0, 2*pi]\n\n if image.requires_grad:\n return stack([h, l_, s], -3)\n return image_hls\n\n\ndef hls_to_rgb(image: Tensor) -> Tensor:\n r\"\"\"Convert a HLS image to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Args:\n image: HLS image to be converted to RGB with shape :math:`(*, 3, H, W)`.\n\n Returns:\n RGB version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = hls_to_rgb(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n im: Tensor = image.unsqueeze(-4)\n h: Tensor = im[..., 0, :, :]\n l: Tensor = im[..., 1, :, :]\n s: Tensor = im[..., 2, :, :]\n h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30\n a = s * torch.min(l, 1.0 - l)\n\n # kr = (0 + h) % 12\n # kg = (8 + h) % 12\n # kb = (4 + h) % 12\n k: Tensor = (h + _HLS2RGB) % 12\n\n # l - a * max(min(min(k - 3.0, 9.0 - k), 1), -1)\n mink = torch.min(k - 3.0, 9.0 - k)\n return torch.addcmul(l, a, mink.clamp_(min=-1.0, max=1.0), value=-1)\n\n\nclass RgbToHls(Module):\n r\"\"\"Convert an image from RGB to HLS.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n HLS version of the image.\n\n Shape:\n - image: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> hls = RgbToHls()\n >>> output = hls(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return rgb_to_hls(image)\n\n\nclass HlsToRgb(Module):\n r\"\"\"Convert an image from HLS to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n RGB version of the image.\n\n Shape:\n - input: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Reference:\n https://en.wikipedia.org/wiki/HSL_and_HSV\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> rgb = HlsToRgb()\n >>> output = rgb(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return hls_to_rgb(image)\n", "path": "kornia/color/hls.py"}]}
2,670
990
gh_patches_debug_36666
rasdani/github-patches
git_diff
arviz-devs__arviz-1263
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HPD plot shows values over each other Hey All, I am trying to figure out how to change the plot_posterior function to show the actual values. In one of my plots, where it is small, I get numbers writing over each other. Any help/advice? ![image](https://user-images.githubusercontent.com/605973/85463990-c71ec780-b56c-11ea-95f6-3706e6ef72a1.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `arviz/plots/backends/matplotlib/posteriorplot.py` Content: ``` 1 """Matplotlib Plot posterior densities.""" 2 from typing import Optional 3 from numbers import Number 4 import matplotlib.pyplot as plt 5 import numpy as np 6 7 from . import backend_show 8 from ....stats import hdi 9 from ...kdeplot import plot_kde 10 from ...plot_utils import ( 11 make_label, 12 _create_axes_grid, 13 format_sig_figs, 14 round_num, 15 calculate_point_estimate, 16 ) 17 from ....numeric_utils import get_bins 18 19 20 def plot_posterior( 21 ax, 22 length_plotters, 23 rows, 24 cols, 25 figsize, 26 plotters, 27 bw, 28 bins, 29 kind, 30 point_estimate, 31 round_to, 32 hdi_prob, 33 multimodal, 34 ref_val, 35 rope, 36 ax_labelsize, 37 xt_labelsize, 38 kwargs, 39 titlesize, 40 backend_kwargs, 41 show, 42 ): 43 """Matplotlib posterior plot.""" 44 if ax is None: 45 _, ax = _create_axes_grid( 46 length_plotters, 47 rows, 48 cols, 49 figsize=figsize, 50 squeeze=False, 51 backend_kwargs=backend_kwargs, 52 ) 53 idx = 0 54 for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)): 55 _plot_posterior_op( 56 idx, 57 x.flatten(), 58 var_name, 59 selection, 60 ax=ax_, 61 bw=bw, 62 bins=bins, 63 kind=kind, 64 point_estimate=point_estimate, 65 round_to=round_to, 66 hdi_prob=hdi_prob, 67 multimodal=multimodal, 68 ref_val=ref_val, 69 rope=rope, 70 ax_labelsize=ax_labelsize, 71 xt_labelsize=xt_labelsize, 72 **kwargs 73 ) 74 idx += 1 75 ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True) 76 77 if backend_show(show): 78 plt.show() 79 80 return ax 81 82 83 def _plot_posterior_op( 84 idx, 85 values, 86 var_name, 87 selection, 88 ax, 89 bw, 90 linewidth, 91 bins, 92 kind, 93 point_estimate, 94 hdi_prob, 95 multimodal, 96 ref_val, 97 rope, 98 ax_labelsize, 99 xt_labelsize, 100 round_to: Optional[int] = None, 101 **kwargs 102 ): # noqa: D202 103 """Artist to draw posterior.""" 104 105 def format_as_percent(x, round_to=0): 106 return "{0:.{1:d}f}%".format(100 * x, round_to) 107 108 def display_ref_val(): 109 if ref_val is None: 110 return 111 elif isinstance(ref_val, dict): 112 val = None 113 for sel in ref_val.get(var_name, []): 114 if all( 115 k in selection and selection[k] == v for k, v in sel.items() if k != "ref_val" 116 ): 117 val = sel["ref_val"] 118 break 119 if val is None: 120 return 121 elif isinstance(ref_val, list): 122 val = ref_val[idx] 123 elif isinstance(ref_val, Number): 124 val = ref_val 125 else: 126 raise ValueError( 127 "Argument `ref_val` must be None, a constant, a list or a " 128 'dictionary like {"var_name": [{"ref_val": ref_val}]}' 129 ) 130 less_than_ref_probability = (values < val).mean() 131 greater_than_ref_probability = (values >= val).mean() 132 ref_in_posterior = "{} <{:g}< {}".format( 133 format_as_percent(less_than_ref_probability, 1), 134 val, 135 format_as_percent(greater_than_ref_probability, 1), 136 ) 137 ax.axvline(val, ymin=0.05, ymax=0.75, color="C1", lw=linewidth, alpha=0.65) 138 ax.text( 139 values.mean(), 140 plot_height * 0.6, 141 ref_in_posterior, 142 size=ax_labelsize, 143 color="C1", 144 weight="semibold", 145 horizontalalignment="center", 146 ) 147 148 def display_rope(): 149 if rope is None: 150 return 151 elif isinstance(rope, dict): 152 vals = None 153 for sel in rope.get(var_name, []): 154 # pylint: disable=line-too-long 155 if all(k in selection and selection[k] == v for k, v in sel.items() if k != "rope"): 156 vals = sel["rope"] 157 break 158 if vals is None: 159 return 160 elif len(rope) == 2: 161 vals = rope 162 else: 163 raise ValueError( 164 "Argument `rope` must be None, a dictionary like" 165 '{"var_name": {"rope": (lo, hi)}}, or an' 166 "iterable of length 2" 167 ) 168 169 ax.plot( 170 vals, 171 (plot_height * 0.02, plot_height * 0.02), 172 lw=linewidth * 5, 173 color="C2", 174 solid_capstyle="butt", 175 zorder=0, 176 alpha=0.7, 177 ) 178 text_props = {"size": ax_labelsize, "horizontalalignment": "center", "color": "C2"} 179 ax.text(vals[0], plot_height * 0.2, vals[0], weight="semibold", **text_props) 180 ax.text(vals[1], plot_height * 0.2, vals[1], weight="semibold", **text_props) 181 182 def display_point_estimate(): 183 if not point_estimate: 184 return 185 point_value = calculate_point_estimate(point_estimate, values, bw) 186 sig_figs = format_sig_figs(point_value, round_to) 187 point_text = "{point_estimate}={point_value:.{sig_figs}g}".format( 188 point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs 189 ) 190 ax.text( 191 point_value, 192 plot_height * 0.8, 193 point_text, 194 size=ax_labelsize, 195 horizontalalignment="center", 196 ) 197 198 def display_hdi(): 199 # np.ndarray with 2 entries, min and max 200 # pylint: disable=line-too-long 201 hdi_probs = hdi(values, hdi_prob=hdi_prob, multimodal=multimodal) # type: np.ndarray 202 203 for hdi_i in np.atleast_2d(hdi_probs): 204 ax.plot( 205 hdi_i, 206 (plot_height * 0.02, plot_height * 0.02), 207 lw=linewidth * 2, 208 color="k", 209 solid_capstyle="butt", 210 ) 211 ax.text( 212 hdi_i[0], 213 plot_height * 0.07, 214 round_num(hdi_i[0], round_to), 215 size=ax_labelsize, 216 horizontalalignment="center", 217 ) 218 ax.text( 219 hdi_i[1], 220 plot_height * 0.07, 221 round_num(hdi_i[1], round_to), 222 size=ax_labelsize, 223 horizontalalignment="center", 224 ) 225 ax.text( 226 (hdi_i[0] + hdi_i[1]) / 2, 227 plot_height * 0.3, 228 format_as_percent(hdi_prob) + " HDI", 229 size=ax_labelsize, 230 horizontalalignment="center", 231 ) 232 233 def format_axes(): 234 ax.yaxis.set_ticks([]) 235 ax.spines["top"].set_visible(False) 236 ax.spines["right"].set_visible(False) 237 ax.spines["left"].set_visible(False) 238 ax.spines["bottom"].set_visible(True) 239 ax.xaxis.set_ticks_position("bottom") 240 ax.tick_params( 241 axis="x", direction="out", width=1, length=3, color="0.5", labelsize=xt_labelsize 242 ) 243 ax.spines["bottom"].set_color("0.5") 244 245 if kind == "kde" and values.dtype.kind == "f": 246 kwargs.setdefault("linewidth", linewidth) 247 plot_kde( 248 values, 249 bw=bw, 250 fill_kwargs={"alpha": kwargs.pop("fill_alpha", 0)}, 251 plot_kwargs=kwargs, 252 ax=ax, 253 rug=False, 254 show=False, 255 ) 256 else: 257 if bins is None: 258 if values.dtype.kind == "i": 259 xmin = values.min() 260 xmax = values.max() 261 bins = get_bins(values) 262 ax.set_xlim(xmin - 0.5, xmax + 0.5) 263 else: 264 bins = "auto" 265 kwargs.setdefault("align", "left") 266 kwargs.setdefault("color", "C0") 267 ax.hist(values, bins=bins, alpha=0.35, **kwargs) 268 269 plot_height = ax.get_ylim()[1] 270 271 format_axes() 272 if hdi_prob != "hide": 273 display_hdi() 274 display_point_estimate() 275 display_ref_val() 276 display_rope() 277 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/arviz/plots/backends/matplotlib/posteriorplot.py b/arviz/plots/backends/matplotlib/posteriorplot.py --- a/arviz/plots/backends/matplotlib/posteriorplot.py +++ b/arviz/plots/backends/matplotlib/posteriorplot.py @@ -69,7 +69,7 @@ rope=rope, ax_labelsize=ax_labelsize, xt_labelsize=xt_labelsize, - **kwargs + **kwargs, ) idx += 1 ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True) @@ -98,7 +98,7 @@ ax_labelsize, xt_labelsize, round_to: Optional[int] = None, - **kwargs + **kwargs, ): # noqa: D202 """Artist to draw posterior.""" @@ -175,9 +175,23 @@ zorder=0, alpha=0.7, ) - text_props = {"size": ax_labelsize, "horizontalalignment": "center", "color": "C2"} - ax.text(vals[0], plot_height * 0.2, vals[0], weight="semibold", **text_props) - ax.text(vals[1], plot_height * 0.2, vals[1], weight="semibold", **text_props) + text_props = {"size": ax_labelsize, "color": "C2"} + ax.text( + vals[0], + plot_height * 0.2, + f"{vals[0]} ", + weight="semibold", + horizontalalignment="right", + **text_props, + ) + ax.text( + vals[1], + plot_height * 0.2, + f" {vals[1]}", + weight="semibold", + horizontalalignment="left", + **text_props, + ) def display_point_estimate(): if not point_estimate: @@ -211,16 +225,16 @@ ax.text( hdi_i[0], plot_height * 0.07, - round_num(hdi_i[0], round_to), + round_num(hdi_i[0], round_to) + " ", size=ax_labelsize, - horizontalalignment="center", + horizontalalignment="right", ) ax.text( hdi_i[1], plot_height * 0.07, - round_num(hdi_i[1], round_to), + " " + round_num(hdi_i[1], round_to), size=ax_labelsize, - horizontalalignment="center", + horizontalalignment="left", ) ax.text( (hdi_i[0] + hdi_i[1]) / 2,
{"golden_diff": "diff --git a/arviz/plots/backends/matplotlib/posteriorplot.py b/arviz/plots/backends/matplotlib/posteriorplot.py\n--- a/arviz/plots/backends/matplotlib/posteriorplot.py\n+++ b/arviz/plots/backends/matplotlib/posteriorplot.py\n@@ -69,7 +69,7 @@\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n- **kwargs\n+ **kwargs,\n )\n idx += 1\n ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)\n@@ -98,7 +98,7 @@\n ax_labelsize,\n xt_labelsize,\n round_to: Optional[int] = None,\n- **kwargs\n+ **kwargs,\n ): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n \n@@ -175,9 +175,23 @@\n zorder=0,\n alpha=0.7,\n )\n- text_props = {\"size\": ax_labelsize, \"horizontalalignment\": \"center\", \"color\": \"C2\"}\n- ax.text(vals[0], plot_height * 0.2, vals[0], weight=\"semibold\", **text_props)\n- ax.text(vals[1], plot_height * 0.2, vals[1], weight=\"semibold\", **text_props)\n+ text_props = {\"size\": ax_labelsize, \"color\": \"C2\"}\n+ ax.text(\n+ vals[0],\n+ plot_height * 0.2,\n+ f\"{vals[0]} \",\n+ weight=\"semibold\",\n+ horizontalalignment=\"right\",\n+ **text_props,\n+ )\n+ ax.text(\n+ vals[1],\n+ plot_height * 0.2,\n+ f\" {vals[1]}\",\n+ weight=\"semibold\",\n+ horizontalalignment=\"left\",\n+ **text_props,\n+ )\n \n def display_point_estimate():\n if not point_estimate:\n@@ -211,16 +225,16 @@\n ax.text(\n hdi_i[0],\n plot_height * 0.07,\n- round_num(hdi_i[0], round_to),\n+ round_num(hdi_i[0], round_to) + \" \",\n size=ax_labelsize,\n- horizontalalignment=\"center\",\n+ horizontalalignment=\"right\",\n )\n ax.text(\n hdi_i[1],\n plot_height * 0.07,\n- round_num(hdi_i[1], round_to),\n+ \" \" + round_num(hdi_i[1], round_to),\n size=ax_labelsize,\n- horizontalalignment=\"center\",\n+ horizontalalignment=\"left\",\n )\n ax.text(\n (hdi_i[0] + hdi_i[1]) / 2,\n", "issue": "HPD plot shows values over each other\nHey All,\r\n\r\nI am trying to figure out how to change the plot_posterior function to show the actual values. In one of my plots, where it is small, I get numbers writing over each other. Any help/advice?\r\n\r\n![image](https://user-images.githubusercontent.com/605973/85463990-c71ec780-b56c-11ea-95f6-3706e6ef72a1.png)\r\n\n", "before_files": [{"content": "\"\"\"Matplotlib Plot posterior densities.\"\"\"\nfrom typing import Optional\nfrom numbers import Number\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ....stats import hdi\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n format_sig_figs,\n round_num,\n calculate_point_estimate,\n)\nfrom ....numeric_utils import get_bins\n\n\ndef plot_posterior(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n plotters,\n bw,\n bins,\n kind,\n point_estimate,\n round_to,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n kwargs,\n titlesize,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib posterior plot.\"\"\"\n if ax is None:\n _, ax = _create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n squeeze=False,\n backend_kwargs=backend_kwargs,\n )\n idx = 0\n for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)):\n _plot_posterior_op(\n idx,\n x.flatten(),\n var_name,\n selection,\n ax=ax_,\n bw=bw,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n **kwargs\n )\n idx += 1\n ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _plot_posterior_op(\n idx,\n values,\n var_name,\n selection,\n ax,\n bw,\n linewidth,\n bins,\n kind,\n point_estimate,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n round_to: Optional[int] = None,\n **kwargs\n): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n\n def format_as_percent(x, round_to=0):\n return \"{0:.{1:d}f}%\".format(100 * x, round_to)\n\n def display_ref_val():\n if ref_val is None:\n return\n elif isinstance(ref_val, dict):\n val = None\n for sel in ref_val.get(var_name, []):\n if all(\n k in selection and selection[k] == v for k, v in sel.items() if k != \"ref_val\"\n ):\n val = sel[\"ref_val\"]\n break\n if val is None:\n return\n elif isinstance(ref_val, list):\n val = ref_val[idx]\n elif isinstance(ref_val, Number):\n val = ref_val\n else:\n raise ValueError(\n \"Argument `ref_val` must be None, a constant, a list or a \"\n 'dictionary like {\"var_name\": [{\"ref_val\": ref_val}]}'\n )\n less_than_ref_probability = (values < val).mean()\n greater_than_ref_probability = (values >= val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n val,\n format_as_percent(greater_than_ref_probability, 1),\n )\n ax.axvline(val, ymin=0.05, ymax=0.75, color=\"C1\", lw=linewidth, alpha=0.65)\n ax.text(\n values.mean(),\n plot_height * 0.6,\n ref_in_posterior,\n size=ax_labelsize,\n color=\"C1\",\n weight=\"semibold\",\n horizontalalignment=\"center\",\n )\n\n def display_rope():\n if rope is None:\n return\n elif isinstance(rope, dict):\n vals = None\n for sel in rope.get(var_name, []):\n # pylint: disable=line-too-long\n if all(k in selection and selection[k] == v for k, v in sel.items() if k != \"rope\"):\n vals = sel[\"rope\"]\n break\n if vals is None:\n return\n elif len(rope) == 2:\n vals = rope\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an'\n \"iterable of length 2\"\n )\n\n ax.plot(\n vals,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 5,\n color=\"C2\",\n solid_capstyle=\"butt\",\n zorder=0,\n alpha=0.7,\n )\n text_props = {\"size\": ax_labelsize, \"horizontalalignment\": \"center\", \"color\": \"C2\"}\n ax.text(vals[0], plot_height * 0.2, vals[0], weight=\"semibold\", **text_props)\n ax.text(vals[1], plot_height * 0.2, vals[1], weight=\"semibold\", **text_props)\n\n def display_point_estimate():\n if not point_estimate:\n return\n point_value = calculate_point_estimate(point_estimate, values, bw)\n sig_figs = format_sig_figs(point_value, round_to)\n point_text = \"{point_estimate}={point_value:.{sig_figs}g}\".format(\n point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs\n )\n ax.text(\n point_value,\n plot_height * 0.8,\n point_text,\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def display_hdi():\n # np.ndarray with 2 entries, min and max\n # pylint: disable=line-too-long\n hdi_probs = hdi(values, hdi_prob=hdi_prob, multimodal=multimodal) # type: np.ndarray\n\n for hdi_i in np.atleast_2d(hdi_probs):\n ax.plot(\n hdi_i,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 2,\n color=\"k\",\n solid_capstyle=\"butt\",\n )\n ax.text(\n hdi_i[0],\n plot_height * 0.07,\n round_num(hdi_i[0], round_to),\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n ax.text(\n hdi_i[1],\n plot_height * 0.07,\n round_num(hdi_i[1], round_to),\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n ax.text(\n (hdi_i[0] + hdi_i[1]) / 2,\n plot_height * 0.3,\n format_as_percent(hdi_prob) + \" HDI\",\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def format_axes():\n ax.yaxis.set_ticks([])\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.tick_params(\n axis=\"x\", direction=\"out\", width=1, length=3, color=\"0.5\", labelsize=xt_labelsize\n )\n ax.spines[\"bottom\"].set_color(\"0.5\")\n\n if kind == \"kde\" and values.dtype.kind == \"f\":\n kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n values,\n bw=bw,\n fill_kwargs={\"alpha\": kwargs.pop(\"fill_alpha\", 0)},\n plot_kwargs=kwargs,\n ax=ax,\n rug=False,\n show=False,\n )\n else:\n if bins is None:\n if values.dtype.kind == \"i\":\n xmin = values.min()\n xmax = values.max()\n bins = get_bins(values)\n ax.set_xlim(xmin - 0.5, xmax + 0.5)\n else:\n bins = \"auto\"\n kwargs.setdefault(\"align\", \"left\")\n kwargs.setdefault(\"color\", \"C0\")\n ax.hist(values, bins=bins, alpha=0.35, **kwargs)\n\n plot_height = ax.get_ylim()[1]\n\n format_axes()\n if hdi_prob != \"hide\":\n display_hdi()\n display_point_estimate()\n display_ref_val()\n display_rope()\n", "path": "arviz/plots/backends/matplotlib/posteriorplot.py"}], "after_files": [{"content": "\"\"\"Matplotlib Plot posterior densities.\"\"\"\nfrom typing import Optional\nfrom numbers import Number\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ....stats import hdi\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n format_sig_figs,\n round_num,\n calculate_point_estimate,\n)\nfrom ....numeric_utils import get_bins\n\n\ndef plot_posterior(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n plotters,\n bw,\n bins,\n kind,\n point_estimate,\n round_to,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n kwargs,\n titlesize,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib posterior plot.\"\"\"\n if ax is None:\n _, ax = _create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n squeeze=False,\n backend_kwargs=backend_kwargs,\n )\n idx = 0\n for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)):\n _plot_posterior_op(\n idx,\n x.flatten(),\n var_name,\n selection,\n ax=ax_,\n bw=bw,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n **kwargs,\n )\n idx += 1\n ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _plot_posterior_op(\n idx,\n values,\n var_name,\n selection,\n ax,\n bw,\n linewidth,\n bins,\n kind,\n point_estimate,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n round_to: Optional[int] = None,\n **kwargs,\n): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n\n def format_as_percent(x, round_to=0):\n return \"{0:.{1:d}f}%\".format(100 * x, round_to)\n\n def display_ref_val():\n if ref_val is None:\n return\n elif isinstance(ref_val, dict):\n val = None\n for sel in ref_val.get(var_name, []):\n if all(\n k in selection and selection[k] == v for k, v in sel.items() if k != \"ref_val\"\n ):\n val = sel[\"ref_val\"]\n break\n if val is None:\n return\n elif isinstance(ref_val, list):\n val = ref_val[idx]\n elif isinstance(ref_val, Number):\n val = ref_val\n else:\n raise ValueError(\n \"Argument `ref_val` must be None, a constant, a list or a \"\n 'dictionary like {\"var_name\": [{\"ref_val\": ref_val}]}'\n )\n less_than_ref_probability = (values < val).mean()\n greater_than_ref_probability = (values >= val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n val,\n format_as_percent(greater_than_ref_probability, 1),\n )\n ax.axvline(val, ymin=0.05, ymax=0.75, color=\"C1\", lw=linewidth, alpha=0.65)\n ax.text(\n values.mean(),\n plot_height * 0.6,\n ref_in_posterior,\n size=ax_labelsize,\n color=\"C1\",\n weight=\"semibold\",\n horizontalalignment=\"center\",\n )\n\n def display_rope():\n if rope is None:\n return\n elif isinstance(rope, dict):\n vals = None\n for sel in rope.get(var_name, []):\n # pylint: disable=line-too-long\n if all(k in selection and selection[k] == v for k, v in sel.items() if k != \"rope\"):\n vals = sel[\"rope\"]\n break\n if vals is None:\n return\n elif len(rope) == 2:\n vals = rope\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an'\n \"iterable of length 2\"\n )\n\n ax.plot(\n vals,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 5,\n color=\"C2\",\n solid_capstyle=\"butt\",\n zorder=0,\n alpha=0.7,\n )\n text_props = {\"size\": ax_labelsize, \"color\": \"C2\"}\n ax.text(\n vals[0],\n plot_height * 0.2,\n f\"{vals[0]} \",\n weight=\"semibold\",\n horizontalalignment=\"right\",\n **text_props,\n )\n ax.text(\n vals[1],\n plot_height * 0.2,\n f\" {vals[1]}\",\n weight=\"semibold\",\n horizontalalignment=\"left\",\n **text_props,\n )\n\n def display_point_estimate():\n if not point_estimate:\n return\n point_value = calculate_point_estimate(point_estimate, values, bw)\n sig_figs = format_sig_figs(point_value, round_to)\n point_text = \"{point_estimate}={point_value:.{sig_figs}g}\".format(\n point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs\n )\n ax.text(\n point_value,\n plot_height * 0.8,\n point_text,\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def display_hdi():\n # np.ndarray with 2 entries, min and max\n # pylint: disable=line-too-long\n hdi_probs = hdi(values, hdi_prob=hdi_prob, multimodal=multimodal) # type: np.ndarray\n\n for hdi_i in np.atleast_2d(hdi_probs):\n ax.plot(\n hdi_i,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 2,\n color=\"k\",\n solid_capstyle=\"butt\",\n )\n ax.text(\n hdi_i[0],\n plot_height * 0.07,\n round_num(hdi_i[0], round_to) + \" \",\n size=ax_labelsize,\n horizontalalignment=\"right\",\n )\n ax.text(\n hdi_i[1],\n plot_height * 0.07,\n \" \" + round_num(hdi_i[1], round_to),\n size=ax_labelsize,\n horizontalalignment=\"left\",\n )\n ax.text(\n (hdi_i[0] + hdi_i[1]) / 2,\n plot_height * 0.3,\n format_as_percent(hdi_prob) + \" HDI\",\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def format_axes():\n ax.yaxis.set_ticks([])\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.tick_params(\n axis=\"x\", direction=\"out\", width=1, length=3, color=\"0.5\", labelsize=xt_labelsize\n )\n ax.spines[\"bottom\"].set_color(\"0.5\")\n\n if kind == \"kde\" and values.dtype.kind == \"f\":\n kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n values,\n bw=bw,\n fill_kwargs={\"alpha\": kwargs.pop(\"fill_alpha\", 0)},\n plot_kwargs=kwargs,\n ax=ax,\n rug=False,\n show=False,\n )\n else:\n if bins is None:\n if values.dtype.kind == \"i\":\n xmin = values.min()\n xmax = values.max()\n bins = get_bins(values)\n ax.set_xlim(xmin - 0.5, xmax + 0.5)\n else:\n bins = \"auto\"\n kwargs.setdefault(\"align\", \"left\")\n kwargs.setdefault(\"color\", \"C0\")\n ax.hist(values, bins=bins, alpha=0.35, **kwargs)\n\n plot_height = ax.get_ylim()[1]\n\n format_axes()\n if hdi_prob != \"hide\":\n display_hdi()\n display_point_estimate()\n display_ref_val()\n display_rope()\n", "path": "arviz/plots/backends/matplotlib/posteriorplot.py"}]}
3,025
629
gh_patches_debug_13793
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- login_required decorator is used at the end of the functions. In the file readthedocs.org/readthedocs/profiles/views.py, `login_required` decorator is used at the end of some functions and for some other functions, it used in the form of `@login_required`, which reduces the readability of the code. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/profiles/views.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Views for creating, editing and viewing site-specific user profiles.""" 3 4 from __future__ import ( 5 absolute_import, 6 division, 7 print_function, 8 unicode_literals, 9 ) 10 11 from django.contrib import messages 12 from django.contrib.auth import logout 13 from django.contrib.auth.decorators import login_required 14 from django.contrib.auth.models import User 15 from django.core.urlresolvers import reverse 16 from django.http import HttpResponseRedirect 17 from django.shortcuts import get_object_or_404, redirect, render 18 from django.utils.translation import ugettext_lazy as _ 19 20 from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm 21 22 23 def edit_profile( 24 request, form_class, success_url=None, 25 template_name='profiles/private/edit_profile.html', extra_context=None): 26 """ 27 Edit the current user's profile. 28 29 **Optional arguments:** 30 31 ``extra_context`` 32 A dictionary of variables to add to the template context. Any 33 callable object in this dictionary will be called to produce 34 the end result which appears in the context. 35 36 ``form_class`` 37 The form class to use for validating and editing the user 38 profile. This form class must operate similarly to a standard 39 Django ``ModelForm`` in that it must accept an instance of the 40 object to be edited as the keyword argument ``instance`` to 41 its constructor, and it must implement a method named 42 ``save()`` which will save the updates to the object. 43 44 ``success_url`` 45 The URL to redirect to following a successful edit. If not 46 specified, this will default to the URL of 47 :view:`profiles.views.profile_detail` for the profile object 48 being edited. 49 50 ``template_name`` 51 The template to use when displaying the profile-editing 52 form. If not specified, this will default to 53 :template:`profiles/edit_profile.html`. 54 55 **Context:** 56 57 ``form`` 58 The form for editing the profile. 59 60 ``profile`` 61 The user's current profile. 62 63 **Template:** 64 65 ``template_name`` keyword argument or 66 :template:`profiles/edit_profile.html`. 67 """ 68 profile_obj = request.user.profile 69 if success_url is None: 70 success_url = reverse( 71 'profiles_profile_detail', 72 kwargs={'username': request.user.username}) 73 if request.method == 'POST': 74 form = form_class( 75 data=request.POST, files=request.FILES, instance=profile_obj) 76 if form.is_valid(): 77 form.save() 78 return HttpResponseRedirect(success_url) 79 else: 80 form = form_class(instance=profile_obj) 81 82 if extra_context is None: 83 extra_context = {} 84 context = { 85 key: value() if callable(value) else value 86 for key, value in extra_context.items() 87 } 88 context.update({ 89 'form': form, 90 'profile': profile_obj, 91 'user': profile_obj.user, 92 }) 93 return render(request, template_name, context=context) 94 95 96 edit_profile = login_required(edit_profile) 97 98 99 @login_required() 100 def delete_account(request): 101 form = UserDeleteForm() 102 template_name = 'profiles/private/delete_account.html' 103 104 if request.method == 'POST': 105 form = UserDeleteForm(instance=request.user, data=request.POST) 106 if form.is_valid(): 107 # Delete the user permanently 108 # It will also delete some projects where the user is the only owner 109 request.user.delete() 110 logout(request) 111 messages.info(request, 'You have successfully deleted your account') 112 113 return redirect('homepage') 114 115 return render(request, template_name, {'form': form}) 116 117 118 def profile_detail( 119 request, username, public_profile_field=None, 120 template_name='profiles/public/profile_detail.html', 121 extra_context=None): 122 """ 123 Detail view of a user's profile. 124 125 If the user does not exists, ``Http404`` will be raised. 126 127 **Required arguments:** 128 129 ``username`` 130 The username of the user whose profile is being displayed. 131 132 **Optional arguments:** 133 134 ``extra_context`` 135 A dictionary of variables to add to the template context. Any 136 callable object in this dictionary will be called to produce 137 the end result which appears in the context. 138 139 ``public_profile_field`` 140 The name of a ``BooleanField`` on the profile model; if the 141 value of that field on the user's profile is ``False``, the 142 ``profile`` variable in the template will be ``None``. Use 143 this feature to allow users to mark their profiles as not 144 being publicly viewable. 145 146 If this argument is not specified, it will be assumed that all 147 users' profiles are publicly viewable. 148 149 ``template_name`` 150 The name of the template to use for displaying the profile. If 151 not specified, this will default to 152 :template:`profiles/profile_detail.html`. 153 154 **Context:** 155 156 ``profile`` 157 The user's profile, or ``None`` if the user's profile is not 158 publicly viewable (see the description of 159 ``public_profile_field`` above). 160 161 **Template:** 162 163 ``template_name`` keyword argument or 164 :template:`profiles/profile_detail.html`. 165 """ 166 user = get_object_or_404(User, username=username) 167 profile_obj = user.profile 168 if (public_profile_field is not None and 169 not getattr(profile_obj, public_profile_field)): 170 profile_obj = None 171 172 if extra_context is None: 173 extra_context = {} 174 context = { 175 key: value() if callable(value) else value 176 for key, value in extra_context.items() 177 } 178 context.update({'profile': profile_obj}) 179 return render(request, template_name, context=context) 180 181 182 @login_required 183 def account_advertising(request): 184 success_url = reverse(account_advertising) 185 profile_obj = request.user.profile 186 if request.method == 'POST': 187 form = UserAdvertisingForm( 188 data=request.POST, 189 instance=profile_obj, 190 ) 191 if form.is_valid(): 192 form.save() 193 messages.info(request, _('Updated your advertising preferences')) 194 return HttpResponseRedirect(success_url) 195 else: 196 form = UserAdvertisingForm(instance=profile_obj) 197 198 return render( 199 request, 200 'profiles/private/advertising_profile.html', 201 context={ 202 'form': form, 203 'profile': profile_obj, 204 'user': profile_obj.user, 205 }, 206 ) 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py --- a/readthedocs/profiles/views.py +++ b/readthedocs/profiles/views.py @@ -20,6 +20,7 @@ from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm +@login_required def edit_profile( request, form_class, success_url=None, template_name='profiles/private/edit_profile.html', extra_context=None): @@ -93,9 +94,6 @@ return render(request, template_name, context=context) -edit_profile = login_required(edit_profile) - - @login_required() def delete_account(request): form = UserDeleteForm()
{"golden_diff": "diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py\n--- a/readthedocs/profiles/views.py\n+++ b/readthedocs/profiles/views.py\n@@ -20,6 +20,7 @@\n from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm\n \n \n+@login_required\n def edit_profile(\n request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html', extra_context=None):\n@@ -93,9 +94,6 @@\n return render(request, template_name, context=context)\n \n \n-edit_profile = login_required(edit_profile)\n-\n-\n @login_required()\n def delete_account(request):\n form = UserDeleteForm()\n", "issue": "login_required decorator is used at the end of the functions.\nIn the file readthedocs.org/readthedocs/profiles/views.py, `login_required` decorator is used at the end of some functions and for some other functions, it used in the form of `@login_required`, which reduces the readability of the code.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm\n\n\ndef edit_profile(\n request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html', extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n \"\"\"\n profile_obj = request.user.profile\n if success_url is None:\n success_url = reverse(\n 'profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(\n data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n })\n return render(request, template_name, context=context)\n\n\nedit_profile = login_required(edit_profile)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n # Delete the user permanently\n # It will also delete some projects where the user is the only owner\n request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(\n request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user does not exists, ``Http404`` will be raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n \"\"\"\n user = get_object_or_404(User, username=username)\n profile_obj = user.profile\n if (public_profile_field is not None and\n not getattr(profile_obj, public_profile_field)):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({'profile': profile_obj})\n return render(request, template_name, context=context)\n\n\n@login_required\ndef account_advertising(request):\n success_url = reverse(account_advertising)\n profile_obj = request.user.profile\n if request.method == 'POST':\n form = UserAdvertisingForm(\n data=request.POST,\n instance=profile_obj,\n )\n if form.is_valid():\n form.save()\n messages.info(request, _('Updated your advertising preferences'))\n return HttpResponseRedirect(success_url)\n else:\n form = UserAdvertisingForm(instance=profile_obj)\n\n return render(\n request,\n 'profiles/private/advertising_profile.html',\n context={\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n },\n )\n", "path": "readthedocs/profiles/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm\n\n\n@login_required\ndef edit_profile(\n request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html', extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n \"\"\"\n profile_obj = request.user.profile\n if success_url is None:\n success_url = reverse(\n 'profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(\n data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n })\n return render(request, template_name, context=context)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n # Delete the user permanently\n # It will also delete some projects where the user is the only owner\n request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(\n request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user does not exists, ``Http404`` will be raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n \"\"\"\n user = get_object_or_404(User, username=username)\n profile_obj = user.profile\n if (public_profile_field is not None and\n not getattr(profile_obj, public_profile_field)):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({'profile': profile_obj})\n return render(request, template_name, context=context)\n\n\n@login_required\ndef account_advertising(request):\n success_url = reverse(account_advertising)\n profile_obj = request.user.profile\n if request.method == 'POST':\n form = UserAdvertisingForm(\n data=request.POST,\n instance=profile_obj,\n )\n if form.is_valid():\n form.save()\n messages.info(request, _('Updated your advertising preferences'))\n return HttpResponseRedirect(success_url)\n else:\n form = UserAdvertisingForm(instance=profile_obj)\n\n return render(\n request,\n 'profiles/private/advertising_profile.html',\n context={\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n },\n )\n", "path": "readthedocs/profiles/views.py"}]}
2,200
151
gh_patches_debug_20222
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-3211
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- proxyauth option breaks flow state access in addons ##### Steps to reproduce the problem: 1. Create `test.py` addon ```py from mitmproxy import ctx class AuthTest: def response(self, flow): ctx.log.info('Getting flow state') flow.get_state() ctx.log.info('Got flow state') addons = [ AuthTest() ] ``` 2. Start `mitmdump`, navigate to example.com and observe the log ```bash mitmdump -s server/test.py Loading script server/test.py Proxy server listening at http://*:8080 ... [::1]:56410: GET http://example.com/ << 200 OK 606b Getting flow state Got flow state ``` 3. Re-start `mitmdump` with proxyauth applied and navigate to example.com again ```bash mitmdump -s server/test.py --set proxyauth=foo:bar Loading script server/test.py Proxy server listening at http://*:8080 ... [::1]:56290: GET http://example.com/ << 200 OK 606b Getting flow state Addon error: Traceback (most recent call last): File "server/test.py", line 5, in response flow.get_state() File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/flow.py", line 94, in get_state d = super().get_state() File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 31, in get_state state[attr] = get_state(cls, val) File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 94, in get_state return _process(typeinfo, val, False) File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 77, in _process for k, v in val.items() File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 77, in <dictcomp> for k, v in val.items() File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 81, in _process assert isinstance(val, (int, str, bool, bytes)) AssertionError ``` ##### Any other comments? What have you tried so far? There's a FIXME [right near the code that's breaking](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/stateobject.py#L80), but I think it's a red herring. My best guess is that proxy auth adds something to the flow state that isn't in the list of allowed types `int, str, bool, bytes`—possibly a `dict`. ##### System information ```bash mitmdump --version Mitmproxy: 4.0.1 Python: 3.6.5 OpenSSL: OpenSSL 1.0.2o 27 Mar 2018 Platform: Darwin-17.5.0-x86_64-i386-64bit ``` <!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/stateobject.py` Content: ``` 1 import typing 2 from typing import Any # noqa 3 from typing import MutableMapping # noqa 4 5 from mitmproxy.coretypes import serializable 6 from mitmproxy.utils import typecheck 7 8 9 class StateObject(serializable.Serializable): 10 """ 11 An object with serializable state. 12 13 State attributes can either be serializable types(str, tuple, bool, ...) 14 or StateObject instances themselves. 15 """ 16 17 _stateobject_attributes: MutableMapping[str, Any] = None 18 """ 19 An attribute-name -> class-or-type dict containing all attributes that 20 should be serialized. If the attribute is a class, it must implement the 21 Serializable protocol. 22 """ 23 24 def get_state(self): 25 """ 26 Retrieve object state. 27 """ 28 state = {} 29 for attr, cls in self._stateobject_attributes.items(): 30 val = getattr(self, attr) 31 state[attr] = get_state(cls, val) 32 return state 33 34 def set_state(self, state): 35 """ 36 Load object state from data returned by a get_state call. 37 """ 38 state = state.copy() 39 for attr, cls in self._stateobject_attributes.items(): 40 val = state.pop(attr) 41 if val is None: 42 setattr(self, attr, val) 43 else: 44 curr = getattr(self, attr) 45 if hasattr(curr, "set_state"): 46 curr.set_state(val) 47 else: 48 setattr(self, attr, make_object(cls, val)) 49 if state: 50 raise RuntimeWarning("Unexpected State in __setstate__: {}".format(state)) 51 52 53 def _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any: 54 if val is None: 55 return None 56 elif make and hasattr(typeinfo, "from_state"): 57 return typeinfo.from_state(val) 58 elif not make and hasattr(val, "get_state"): 59 return val.get_state() 60 61 typename = str(typeinfo) 62 63 if typename.startswith("typing.List"): 64 T = typecheck.sequence_type(typeinfo) 65 return [_process(T, x, make) for x in val] 66 elif typename.startswith("typing.Tuple"): 67 Ts = typecheck.tuple_types(typeinfo) 68 if len(Ts) != len(val): 69 raise ValueError("Invalid data. Expected {}, got {}.".format(Ts, val)) 70 return tuple( 71 _process(T, x, make) for T, x in zip(Ts, val) 72 ) 73 elif typename.startswith("typing.Dict"): 74 k_cls, v_cls = typecheck.mapping_types(typeinfo) 75 return { 76 _process(k_cls, k, make): _process(v_cls, v, make) 77 for k, v in val.items() 78 } 79 elif typename.startswith("typing.Any"): 80 # FIXME: Remove this when we remove flow.metadata 81 assert isinstance(val, (int, str, bool, bytes)) 82 return val 83 else: 84 return typeinfo(val) 85 86 87 def make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any: 88 """Create an object based on the state given in val.""" 89 return _process(typeinfo, val, True) 90 91 92 def get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any: 93 """Get the state of the object given as val.""" 94 return _process(typeinfo, val, False) 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/stateobject.py b/mitmproxy/stateobject.py --- a/mitmproxy/stateobject.py +++ b/mitmproxy/stateobject.py @@ -1,6 +1,7 @@ import typing from typing import Any # noqa from typing import MutableMapping # noqa +import json from mitmproxy.coretypes import serializable from mitmproxy.utils import typecheck @@ -77,8 +78,14 @@ for k, v in val.items() } elif typename.startswith("typing.Any"): - # FIXME: Remove this when we remove flow.metadata - assert isinstance(val, (int, str, bool, bytes)) + # This requires a bit of explanation. We can't import our IO layer here, + # because it causes a circular import. Rather than restructuring the + # code for this, we use JSON serialization, which has similar primitive + # type restrictions as tnetstring, to check for conformance. + try: + json.dumps(val) + except TypeError: + raise ValueError(f"Data not serializable: {val}") return val else: return typeinfo(val)
{"golden_diff": "diff --git a/mitmproxy/stateobject.py b/mitmproxy/stateobject.py\n--- a/mitmproxy/stateobject.py\n+++ b/mitmproxy/stateobject.py\n@@ -1,6 +1,7 @@\n import typing\n from typing import Any # noqa\n from typing import MutableMapping # noqa\n+import json\n \n from mitmproxy.coretypes import serializable\n from mitmproxy.utils import typecheck\n@@ -77,8 +78,14 @@\n for k, v in val.items()\n }\n elif typename.startswith(\"typing.Any\"):\n- # FIXME: Remove this when we remove flow.metadata\n- assert isinstance(val, (int, str, bool, bytes))\n+ # This requires a bit of explanation. We can't import our IO layer here,\n+ # because it causes a circular import. Rather than restructuring the\n+ # code for this, we use JSON serialization, which has similar primitive\n+ # type restrictions as tnetstring, to check for conformance.\n+ try:\n+ json.dumps(val)\n+ except TypeError:\n+ raise ValueError(f\"Data not serializable: {val}\")\n return val\n else:\n return typeinfo(val)\n", "issue": "proxyauth option breaks flow state access in addons\n##### Steps to reproduce the problem:\r\n\r\n1. Create `test.py` addon\r\n```py\r\nfrom mitmproxy import ctx\r\nclass AuthTest:\r\n def response(self, flow):\r\n ctx.log.info('Getting flow state')\r\n flow.get_state()\r\n ctx.log.info('Got flow state')\r\n\r\naddons = [\r\n AuthTest()\r\n]\r\n```\r\n2. Start `mitmdump`, navigate to example.com and observe the log\r\n```bash\r\nmitmdump -s server/test.py\r\nLoading script server/test.py\r\nProxy server listening at http://*:8080\r\n...\r\n[::1]:56410: GET http://example.com/\r\n << 200 OK 606b\r\nGetting flow state\r\nGot flow state\r\n```\r\n3. Re-start `mitmdump` with proxyauth applied and navigate to example.com again\r\n```bash\r\nmitmdump -s server/test.py --set proxyauth=foo:bar\r\nLoading script server/test.py\r\nProxy server listening at http://*:8080\r\n...\r\n[::1]:56290: GET http://example.com/\r\n << 200 OK 606b\r\nGetting flow state\r\nAddon error: Traceback (most recent call last):\r\n File \"server/test.py\", line 5, in response\r\n flow.get_state()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/flow.py\", line 94, in get_state\r\n d = super().get_state()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 31, in get_state\r\n state[attr] = get_state(cls, val)\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 94, in get_state\r\n return _process(typeinfo, val, False)\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 77, in _process\r\n for k, v in val.items()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 77, in <dictcomp>\r\n for k, v in val.items()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 81, in _process\r\n assert isinstance(val, (int, str, bool, bytes))\r\nAssertionError\r\n```\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThere's a FIXME [right near the code that's breaking](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/stateobject.py#L80), but I think it's a red herring. My best guess is that proxy auth adds something to the flow state that isn't in the list of allowed types `int, str, bool, bytes`\u2014possibly a `dict`.\r\n\r\n##### System information\r\n\r\n```bash\r\nmitmdump --version\r\nMitmproxy: 4.0.1\r\nPython: 3.6.5\r\nOpenSSL: OpenSSL 1.0.2o 27 Mar 2018\r\nPlatform: Darwin-17.5.0-x86_64-i386-64bit\r\n```\r\n\r\n<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->\r\n\n", "before_files": [{"content": "import typing\nfrom typing import Any # noqa\nfrom typing import MutableMapping # noqa\n\nfrom mitmproxy.coretypes import serializable\nfrom mitmproxy.utils import typecheck\n\n\nclass StateObject(serializable.Serializable):\n \"\"\"\n An object with serializable state.\n\n State attributes can either be serializable types(str, tuple, bool, ...)\n or StateObject instances themselves.\n \"\"\"\n\n _stateobject_attributes: MutableMapping[str, Any] = None\n \"\"\"\n An attribute-name -> class-or-type dict containing all attributes that\n should be serialized. If the attribute is a class, it must implement the\n Serializable protocol.\n \"\"\"\n\n def get_state(self):\n \"\"\"\n Retrieve object state.\n \"\"\"\n state = {}\n for attr, cls in self._stateobject_attributes.items():\n val = getattr(self, attr)\n state[attr] = get_state(cls, val)\n return state\n\n def set_state(self, state):\n \"\"\"\n Load object state from data returned by a get_state call.\n \"\"\"\n state = state.copy()\n for attr, cls in self._stateobject_attributes.items():\n val = state.pop(attr)\n if val is None:\n setattr(self, attr, val)\n else:\n curr = getattr(self, attr)\n if hasattr(curr, \"set_state\"):\n curr.set_state(val)\n else:\n setattr(self, attr, make_object(cls, val))\n if state:\n raise RuntimeWarning(\"Unexpected State in __setstate__: {}\".format(state))\n\n\ndef _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:\n if val is None:\n return None\n elif make and hasattr(typeinfo, \"from_state\"):\n return typeinfo.from_state(val)\n elif not make and hasattr(val, \"get_state\"):\n return val.get_state()\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.List\"):\n T = typecheck.sequence_type(typeinfo)\n return [_process(T, x, make) for x in val]\n elif typename.startswith(\"typing.Tuple\"):\n Ts = typecheck.tuple_types(typeinfo)\n if len(Ts) != len(val):\n raise ValueError(\"Invalid data. Expected {}, got {}.\".format(Ts, val))\n return tuple(\n _process(T, x, make) for T, x in zip(Ts, val)\n )\n elif typename.startswith(\"typing.Dict\"):\n k_cls, v_cls = typecheck.mapping_types(typeinfo)\n return {\n _process(k_cls, k, make): _process(v_cls, v, make)\n for k, v in val.items()\n }\n elif typename.startswith(\"typing.Any\"):\n # FIXME: Remove this when we remove flow.metadata\n assert isinstance(val, (int, str, bool, bytes))\n return val\n else:\n return typeinfo(val)\n\n\ndef make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Create an object based on the state given in val.\"\"\"\n return _process(typeinfo, val, True)\n\n\ndef get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Get the state of the object given as val.\"\"\"\n return _process(typeinfo, val, False)\n", "path": "mitmproxy/stateobject.py"}], "after_files": [{"content": "import typing\nfrom typing import Any # noqa\nfrom typing import MutableMapping # noqa\nimport json\n\nfrom mitmproxy.coretypes import serializable\nfrom mitmproxy.utils import typecheck\n\n\nclass StateObject(serializable.Serializable):\n \"\"\"\n An object with serializable state.\n\n State attributes can either be serializable types(str, tuple, bool, ...)\n or StateObject instances themselves.\n \"\"\"\n\n _stateobject_attributes: MutableMapping[str, Any] = None\n \"\"\"\n An attribute-name -> class-or-type dict containing all attributes that\n should be serialized. If the attribute is a class, it must implement the\n Serializable protocol.\n \"\"\"\n\n def get_state(self):\n \"\"\"\n Retrieve object state.\n \"\"\"\n state = {}\n for attr, cls in self._stateobject_attributes.items():\n val = getattr(self, attr)\n state[attr] = get_state(cls, val)\n return state\n\n def set_state(self, state):\n \"\"\"\n Load object state from data returned by a get_state call.\n \"\"\"\n state = state.copy()\n for attr, cls in self._stateobject_attributes.items():\n val = state.pop(attr)\n if val is None:\n setattr(self, attr, val)\n else:\n curr = getattr(self, attr)\n if hasattr(curr, \"set_state\"):\n curr.set_state(val)\n else:\n setattr(self, attr, make_object(cls, val))\n if state:\n raise RuntimeWarning(\"Unexpected State in __setstate__: {}\".format(state))\n\n\ndef _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:\n if val is None:\n return None\n elif make and hasattr(typeinfo, \"from_state\"):\n return typeinfo.from_state(val)\n elif not make and hasattr(val, \"get_state\"):\n return val.get_state()\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.List\"):\n T = typecheck.sequence_type(typeinfo)\n return [_process(T, x, make) for x in val]\n elif typename.startswith(\"typing.Tuple\"):\n Ts = typecheck.tuple_types(typeinfo)\n if len(Ts) != len(val):\n raise ValueError(\"Invalid data. Expected {}, got {}.\".format(Ts, val))\n return tuple(\n _process(T, x, make) for T, x in zip(Ts, val)\n )\n elif typename.startswith(\"typing.Dict\"):\n k_cls, v_cls = typecheck.mapping_types(typeinfo)\n return {\n _process(k_cls, k, make): _process(v_cls, v, make)\n for k, v in val.items()\n }\n elif typename.startswith(\"typing.Any\"):\n # This requires a bit of explanation. We can't import our IO layer here,\n # because it causes a circular import. Rather than restructuring the\n # code for this, we use JSON serialization, which has similar primitive\n # type restrictions as tnetstring, to check for conformance.\n try:\n json.dumps(val)\n except TypeError:\n raise ValueError(f\"Data not serializable: {val}\")\n return val\n else:\n return typeinfo(val)\n\n\ndef make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Create an object based on the state given in val.\"\"\"\n return _process(typeinfo, val, True)\n\n\ndef get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Get the state of the object given as val.\"\"\"\n return _process(typeinfo, val, False)\n", "path": "mitmproxy/stateobject.py"}]}
1,965
258
gh_patches_debug_50400
rasdani/github-patches
git_diff
freqtrade__freqtrade-2251
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Telegram Daily Is not Working on Freqtrade for coinbasepro ## Step 1: Have you search for this issue before posting it? Yes If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). If it hasn't been reported, please create a new issue. ## Step 2: Describe your environment * Operating system: _Ubuntu___ * Python Version: ___3.6__ (`python -V`) * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`) * Branch: Master | Develop * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format="%H" -n 1`) ## Step 3: Describe the problem: Running dry run mode, i am trying to check the /daily on telegram, i get the following error: ``` Traceback (most recent call last): File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 49, in wrapper return command_handler(self, *args, **kwargs) File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 247, in _daily fiat_disp_cur File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in _rpc_daily_profit for key, value in profit_days.items() File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in <listcomp> for key, value in profit_days.items() ValueError: Unknown format code 'f' for object of type 'str' ``` *Explain the problem you have encountered* ### Steps to reproduce: 1. _connect to coinbase pro____ 2. _run strategy____ 3. ___on telegram, hit /daily__ ### Observed Results: * What happened? output error * What did you expect to happen? Result of daily profit/loss ### Relevant code exceptions or logs: ``` // paste your log here ``` Telegram Daily Is not Working on Freqtrade for coinbasepro ## Step 1: Have you search for this issue before posting it? Yes If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). If it hasn't been reported, please create a new issue. ## Step 2: Describe your environment * Operating system: _Ubuntu___ * Python Version: ___3.6__ (`python -V`) * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`) * Branch: Master | Develop * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format="%H" -n 1`) ## Step 3: Describe the problem: Running dry run mode, i am trying to check the /daily on telegram, i get the following error: ``` Traceback (most recent call last): File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 49, in wrapper return command_handler(self, *args, **kwargs) File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 247, in _daily fiat_disp_cur File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in _rpc_daily_profit for key, value in profit_days.items() File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in <listcomp> for key, value in profit_days.items() ValueError: Unknown format code 'f' for object of type 'str' ``` *Explain the problem you have encountered* ### Steps to reproduce: 1. _connect to coinbase pro____ 2. _run strategy____ 3. ___on telegram, hit /daily__ ### Observed Results: * What happened? output error * What did you expect to happen? Result of daily profit/loss ### Relevant code exceptions or logs: ``` // paste your log here ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `freqtrade/rpc/fiat_convert.py` Content: ``` 1 """ 2 Module that define classes to convert Crypto-currency to FIAT 3 e.g BTC to USD 4 """ 5 6 import logging 7 import time 8 from typing import Dict, List 9 10 from coinmarketcap import Market 11 12 from freqtrade.constants import SUPPORTED_FIAT 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class CryptoFiat(object): 19 """ 20 Object to describe what is the price of Crypto-currency in a FIAT 21 """ 22 # Constants 23 CACHE_DURATION = 6 * 60 * 60 # 6 hours 24 25 def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None: 26 """ 27 Create an object that will contains the price for a crypto-currency in fiat 28 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC) 29 :param fiat_symbol: FIAT currency you want to convert to (e.g USD) 30 :param price: Price in FIAT 31 """ 32 33 # Public attributes 34 self.crypto_symbol = None 35 self.fiat_symbol = None 36 self.price = 0.0 37 38 # Private attributes 39 self._expiration = 0.0 40 41 self.crypto_symbol = crypto_symbol.upper() 42 self.fiat_symbol = fiat_symbol.upper() 43 self.set_price(price=price) 44 45 def set_price(self, price: float) -> None: 46 """ 47 Set the price of the Crypto-currency in FIAT and set the expiration time 48 :param price: Price of the current Crypto currency in the fiat 49 :return: None 50 """ 51 self.price = price 52 self._expiration = time.time() + self.CACHE_DURATION 53 54 def is_expired(self) -> bool: 55 """ 56 Return if the current price is still valid or needs to be refreshed 57 :return: bool, true the price is expired and needs to be refreshed, false the price is 58 still valid 59 """ 60 return self._expiration - time.time() <= 0 61 62 63 class CryptoToFiatConverter(object): 64 """ 65 Main class to initiate Crypto to FIAT. 66 This object contains a list of pair Crypto, FIAT 67 This object is also a Singleton 68 """ 69 __instance = None 70 _coinmarketcap: Market = None 71 72 _cryptomap: Dict = {} 73 74 def __new__(cls): 75 if CryptoToFiatConverter.__instance is None: 76 CryptoToFiatConverter.__instance = object.__new__(cls) 77 try: 78 CryptoToFiatConverter._coinmarketcap = Market() 79 except BaseException: 80 CryptoToFiatConverter._coinmarketcap = None 81 return CryptoToFiatConverter.__instance 82 83 def __init__(self) -> None: 84 self._pairs: List[CryptoFiat] = [] 85 self._load_cryptomap() 86 87 def _load_cryptomap(self) -> None: 88 try: 89 coinlistings = self._coinmarketcap.listings() 90 self._cryptomap = dict(map(lambda coin: (coin["symbol"], str(coin["id"])), 91 coinlistings["data"])) 92 except (BaseException) as exception: 93 logger.error( 94 "Could not load FIAT Cryptocurrency map for the following problem: %s", 95 type(exception).__name__ 96 ) 97 98 def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float: 99 """ 100 Convert an amount of crypto-currency to fiat 101 :param crypto_amount: amount of crypto-currency to convert 102 :param crypto_symbol: crypto-currency used 103 :param fiat_symbol: fiat to convert to 104 :return: float, value in fiat of the crypto-currency amount 105 """ 106 if crypto_symbol == fiat_symbol: 107 return crypto_amount 108 price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol) 109 return float(crypto_amount) * float(price) 110 111 def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float: 112 """ 113 Return the price of the Crypto-currency in Fiat 114 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC) 115 :param fiat_symbol: FIAT currency you want to convert to (e.g USD) 116 :return: Price in FIAT 117 """ 118 crypto_symbol = crypto_symbol.upper() 119 fiat_symbol = fiat_symbol.upper() 120 121 # Check if the fiat convertion you want is supported 122 if not self._is_supported_fiat(fiat=fiat_symbol): 123 raise ValueError(f'The fiat {fiat_symbol} is not supported.') 124 125 # Get the pair that interest us and return the price in fiat 126 for pair in self._pairs: 127 if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol: 128 # If the price is expired we refresh it, avoid to call the API all the time 129 if pair.is_expired(): 130 pair.set_price( 131 price=self._find_price( 132 crypto_symbol=pair.crypto_symbol, 133 fiat_symbol=pair.fiat_symbol 134 ) 135 ) 136 137 # return the last price we have for this pair 138 return pair.price 139 140 # The pair does not exist, so we create it and return the price 141 return self._add_pair( 142 crypto_symbol=crypto_symbol, 143 fiat_symbol=fiat_symbol, 144 price=self._find_price( 145 crypto_symbol=crypto_symbol, 146 fiat_symbol=fiat_symbol 147 ) 148 ) 149 150 def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float: 151 """ 152 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC) 153 :param fiat_symbol: FIAT currency you want to convert to (e.g USD) 154 :return: price in FIAT 155 """ 156 self._pairs.append( 157 CryptoFiat( 158 crypto_symbol=crypto_symbol, 159 fiat_symbol=fiat_symbol, 160 price=price 161 ) 162 ) 163 164 return price 165 166 def _is_supported_fiat(self, fiat: str) -> bool: 167 """ 168 Check if the FIAT your want to convert to is supported 169 :param fiat: FIAT to check (e.g USD) 170 :return: bool, True supported, False not supported 171 """ 172 173 fiat = fiat.upper() 174 175 return fiat in SUPPORTED_FIAT 176 177 def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float: 178 """ 179 Call CoinMarketCap API to retrieve the price in the FIAT 180 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC) 181 :param fiat_symbol: FIAT currency you want to convert to (e.g USD) 182 :return: float, price of the crypto-currency in Fiat 183 """ 184 # Check if the fiat convertion you want is supported 185 if not self._is_supported_fiat(fiat=fiat_symbol): 186 raise ValueError(f'The fiat {fiat_symbol} is not supported.') 187 188 # No need to convert if both crypto and fiat are the same 189 if crypto_symbol == fiat_symbol: 190 return 1.0 191 192 if crypto_symbol not in self._cryptomap: 193 # return 0 for unsupported stake currencies (fiat-convert should not break the bot) 194 logger.warning("unsupported crypto-symbol %s - returning 0.0", crypto_symbol) 195 return 0.0 196 197 try: 198 return float( 199 self._coinmarketcap.ticker( 200 currency=self._cryptomap[crypto_symbol], 201 convert=fiat_symbol 202 )['data']['quotes'][fiat_symbol.upper()]['price'] 203 ) 204 except BaseException as exception: 205 logger.error("Error in _find_price: %s", exception) 206 return 0.0 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py --- a/freqtrade/rpc/fiat_convert.py +++ b/freqtrade/rpc/fiat_convert.py @@ -104,7 +104,7 @@ :return: float, value in fiat of the crypto-currency amount """ if crypto_symbol == fiat_symbol: - return crypto_amount + return float(crypto_amount) price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol) return float(crypto_amount) * float(price)
{"golden_diff": "diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py\n--- a/freqtrade/rpc/fiat_convert.py\n+++ b/freqtrade/rpc/fiat_convert.py\n@@ -104,7 +104,7 @@\n :return: float, value in fiat of the crypto-currency amount\n \"\"\"\n if crypto_symbol == fiat_symbol:\n- return crypto_amount\n+ return float(crypto_amount)\n price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)\n return float(crypto_amount) * float(price)\n", "issue": "Telegram Daily Is not Working on Freqtrade for coinbasepro\n## Step 1: Have you search for this issue before posting it?\r\nYes\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\n## Step 2: Describe your environment\r\n\r\n * Operating system: _Ubuntu___\r\n * Python Version: ___3.6__ (`python -V`)\r\n * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)\r\n * Branch: Master | Develop\r\n * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format=\"%H\" -n 1`)\r\n \r\n## Step 3: Describe the problem:\r\nRunning dry run mode, i am trying to check the /daily on telegram, i get the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 49, in wrapper\r\n return command_handler(self, *args, **kwargs)\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 247, in _daily\r\n fiat_disp_cur\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in _rpc_daily_profit\r\n for key, value in profit_days.items()\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in <listcomp>\r\n for key, value in profit_days.items()\r\nValueError: Unknown format code 'f' for object of type 'str'\r\n```\r\n*Explain the problem you have encountered*\r\n\r\n### Steps to reproduce:\r\n\r\n 1. _connect to coinbase pro____\r\n 2. _run strategy____\r\n 3. ___on telegram, hit /daily__\r\n \r\n### Observed Results:\r\n\r\n * What happened? output error\r\n * What did you expect to happen? Result of daily profit/loss\r\n\r\n### Relevant code exceptions or logs:\r\n\r\n ```\r\n // paste your log here\r\n ```\r\n\nTelegram Daily Is not Working on Freqtrade for coinbasepro\n## Step 1: Have you search for this issue before posting it?\r\nYes\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\n## Step 2: Describe your environment\r\n\r\n * Operating system: _Ubuntu___\r\n * Python Version: ___3.6__ (`python -V`)\r\n * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)\r\n * Branch: Master | Develop\r\n * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format=\"%H\" -n 1`)\r\n \r\n## Step 3: Describe the problem:\r\nRunning dry run mode, i am trying to check the /daily on telegram, i get the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 49, in wrapper\r\n return command_handler(self, *args, **kwargs)\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 247, in _daily\r\n fiat_disp_cur\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in _rpc_daily_profit\r\n for key, value in profit_days.items()\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in <listcomp>\r\n for key, value in profit_days.items()\r\nValueError: Unknown format code 'f' for object of type 'str'\r\n```\r\n*Explain the problem you have encountered*\r\n\r\n### Steps to reproduce:\r\n\r\n 1. _connect to coinbase pro____\r\n 2. _run strategy____\r\n 3. ___on telegram, hit /daily__\r\n \r\n### Observed Results:\r\n\r\n * What happened? output error\r\n * What did you expect to happen? Result of daily profit/loss\r\n\r\n### Relevant code exceptions or logs:\r\n\r\n ```\r\n // paste your log here\r\n ```\r\n\n", "before_files": [{"content": "\"\"\"\nModule that define classes to convert Crypto-currency to FIAT\ne.g BTC to USD\n\"\"\"\n\nimport logging\nimport time\nfrom typing import Dict, List\n\nfrom coinmarketcap import Market\n\nfrom freqtrade.constants import SUPPORTED_FIAT\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CryptoFiat(object):\n \"\"\"\n Object to describe what is the price of Crypto-currency in a FIAT\n \"\"\"\n # Constants\n CACHE_DURATION = 6 * 60 * 60 # 6 hours\n\n def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:\n \"\"\"\n Create an object that will contains the price for a crypto-currency in fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :param price: Price in FIAT\n \"\"\"\n\n # Public attributes\n self.crypto_symbol = None\n self.fiat_symbol = None\n self.price = 0.0\n\n # Private attributes\n self._expiration = 0.0\n\n self.crypto_symbol = crypto_symbol.upper()\n self.fiat_symbol = fiat_symbol.upper()\n self.set_price(price=price)\n\n def set_price(self, price: float) -> None:\n \"\"\"\n Set the price of the Crypto-currency in FIAT and set the expiration time\n :param price: Price of the current Crypto currency in the fiat\n :return: None\n \"\"\"\n self.price = price\n self._expiration = time.time() + self.CACHE_DURATION\n\n def is_expired(self) -> bool:\n \"\"\"\n Return if the current price is still valid or needs to be refreshed\n :return: bool, true the price is expired and needs to be refreshed, false the price is\n still valid\n \"\"\"\n return self._expiration - time.time() <= 0\n\n\nclass CryptoToFiatConverter(object):\n \"\"\"\n Main class to initiate Crypto to FIAT.\n This object contains a list of pair Crypto, FIAT\n This object is also a Singleton\n \"\"\"\n __instance = None\n _coinmarketcap: Market = None\n\n _cryptomap: Dict = {}\n\n def __new__(cls):\n if CryptoToFiatConverter.__instance is None:\n CryptoToFiatConverter.__instance = object.__new__(cls)\n try:\n CryptoToFiatConverter._coinmarketcap = Market()\n except BaseException:\n CryptoToFiatConverter._coinmarketcap = None\n return CryptoToFiatConverter.__instance\n\n def __init__(self) -> None:\n self._pairs: List[CryptoFiat] = []\n self._load_cryptomap()\n\n def _load_cryptomap(self) -> None:\n try:\n coinlistings = self._coinmarketcap.listings()\n self._cryptomap = dict(map(lambda coin: (coin[\"symbol\"], str(coin[\"id\"])),\n coinlistings[\"data\"]))\n except (BaseException) as exception:\n logger.error(\n \"Could not load FIAT Cryptocurrency map for the following problem: %s\",\n type(exception).__name__\n )\n\n def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Convert an amount of crypto-currency to fiat\n :param crypto_amount: amount of crypto-currency to convert\n :param crypto_symbol: crypto-currency used\n :param fiat_symbol: fiat to convert to\n :return: float, value in fiat of the crypto-currency amount\n \"\"\"\n if crypto_symbol == fiat_symbol:\n return crypto_amount\n price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)\n return float(crypto_amount) * float(price)\n\n def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Return the price of the Crypto-currency in Fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: Price in FIAT\n \"\"\"\n crypto_symbol = crypto_symbol.upper()\n fiat_symbol = fiat_symbol.upper()\n\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # Get the pair that interest us and return the price in fiat\n for pair in self._pairs:\n if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:\n # If the price is expired we refresh it, avoid to call the API all the time\n if pair.is_expired():\n pair.set_price(\n price=self._find_price(\n crypto_symbol=pair.crypto_symbol,\n fiat_symbol=pair.fiat_symbol\n )\n )\n\n # return the last price we have for this pair\n return pair.price\n\n # The pair does not exist, so we create it and return the price\n return self._add_pair(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=self._find_price(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol\n )\n )\n\n def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:\n \"\"\"\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: price in FIAT\n \"\"\"\n self._pairs.append(\n CryptoFiat(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=price\n )\n )\n\n return price\n\n def _is_supported_fiat(self, fiat: str) -> bool:\n \"\"\"\n Check if the FIAT your want to convert to is supported\n :param fiat: FIAT to check (e.g USD)\n :return: bool, True supported, False not supported\n \"\"\"\n\n fiat = fiat.upper()\n\n return fiat in SUPPORTED_FIAT\n\n def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Call CoinMarketCap API to retrieve the price in the FIAT\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: float, price of the crypto-currency in Fiat\n \"\"\"\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # No need to convert if both crypto and fiat are the same\n if crypto_symbol == fiat_symbol:\n return 1.0\n\n if crypto_symbol not in self._cryptomap:\n # return 0 for unsupported stake currencies (fiat-convert should not break the bot)\n logger.warning(\"unsupported crypto-symbol %s - returning 0.0\", crypto_symbol)\n return 0.0\n\n try:\n return float(\n self._coinmarketcap.ticker(\n currency=self._cryptomap[crypto_symbol],\n convert=fiat_symbol\n )['data']['quotes'][fiat_symbol.upper()]['price']\n )\n except BaseException as exception:\n logger.error(\"Error in _find_price: %s\", exception)\n return 0.0\n", "path": "freqtrade/rpc/fiat_convert.py"}], "after_files": [{"content": "\"\"\"\nModule that define classes to convert Crypto-currency to FIAT\ne.g BTC to USD\n\"\"\"\n\nimport logging\nimport time\nfrom typing import Dict, List\n\nfrom coinmarketcap import Market\n\nfrom freqtrade.constants import SUPPORTED_FIAT\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CryptoFiat(object):\n \"\"\"\n Object to describe what is the price of Crypto-currency in a FIAT\n \"\"\"\n # Constants\n CACHE_DURATION = 6 * 60 * 60 # 6 hours\n\n def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:\n \"\"\"\n Create an object that will contains the price for a crypto-currency in fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :param price: Price in FIAT\n \"\"\"\n\n # Public attributes\n self.crypto_symbol = None\n self.fiat_symbol = None\n self.price = 0.0\n\n # Private attributes\n self._expiration = 0.0\n\n self.crypto_symbol = crypto_symbol.upper()\n self.fiat_symbol = fiat_symbol.upper()\n self.set_price(price=price)\n\n def set_price(self, price: float) -> None:\n \"\"\"\n Set the price of the Crypto-currency in FIAT and set the expiration time\n :param price: Price of the current Crypto currency in the fiat\n :return: None\n \"\"\"\n self.price = price\n self._expiration = time.time() + self.CACHE_DURATION\n\n def is_expired(self) -> bool:\n \"\"\"\n Return if the current price is still valid or needs to be refreshed\n :return: bool, true the price is expired and needs to be refreshed, false the price is\n still valid\n \"\"\"\n return self._expiration - time.time() <= 0\n\n\nclass CryptoToFiatConverter(object):\n \"\"\"\n Main class to initiate Crypto to FIAT.\n This object contains a list of pair Crypto, FIAT\n This object is also a Singleton\n \"\"\"\n __instance = None\n _coinmarketcap: Market = None\n\n _cryptomap: Dict = {}\n\n def __new__(cls):\n if CryptoToFiatConverter.__instance is None:\n CryptoToFiatConverter.__instance = object.__new__(cls)\n try:\n CryptoToFiatConverter._coinmarketcap = Market()\n except BaseException:\n CryptoToFiatConverter._coinmarketcap = None\n return CryptoToFiatConverter.__instance\n\n def __init__(self) -> None:\n self._pairs: List[CryptoFiat] = []\n self._load_cryptomap()\n\n def _load_cryptomap(self) -> None:\n try:\n coinlistings = self._coinmarketcap.listings()\n self._cryptomap = dict(map(lambda coin: (coin[\"symbol\"], str(coin[\"id\"])),\n coinlistings[\"data\"]))\n except (BaseException) as exception:\n logger.error(\n \"Could not load FIAT Cryptocurrency map for the following problem: %s\",\n type(exception).__name__\n )\n\n def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Convert an amount of crypto-currency to fiat\n :param crypto_amount: amount of crypto-currency to convert\n :param crypto_symbol: crypto-currency used\n :param fiat_symbol: fiat to convert to\n :return: float, value in fiat of the crypto-currency amount\n \"\"\"\n if crypto_symbol == fiat_symbol:\n return float(crypto_amount)\n price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)\n return float(crypto_amount) * float(price)\n\n def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Return the price of the Crypto-currency in Fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: Price in FIAT\n \"\"\"\n crypto_symbol = crypto_symbol.upper()\n fiat_symbol = fiat_symbol.upper()\n\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # Get the pair that interest us and return the price in fiat\n for pair in self._pairs:\n if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:\n # If the price is expired we refresh it, avoid to call the API all the time\n if pair.is_expired():\n pair.set_price(\n price=self._find_price(\n crypto_symbol=pair.crypto_symbol,\n fiat_symbol=pair.fiat_symbol\n )\n )\n\n # return the last price we have for this pair\n return pair.price\n\n # The pair does not exist, so we create it and return the price\n return self._add_pair(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=self._find_price(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol\n )\n )\n\n def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:\n \"\"\"\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: price in FIAT\n \"\"\"\n self._pairs.append(\n CryptoFiat(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=price\n )\n )\n\n return price\n\n def _is_supported_fiat(self, fiat: str) -> bool:\n \"\"\"\n Check if the FIAT your want to convert to is supported\n :param fiat: FIAT to check (e.g USD)\n :return: bool, True supported, False not supported\n \"\"\"\n\n fiat = fiat.upper()\n\n return fiat in SUPPORTED_FIAT\n\n def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Call CoinMarketCap API to retrieve the price in the FIAT\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: float, price of the crypto-currency in Fiat\n \"\"\"\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # No need to convert if both crypto and fiat are the same\n if crypto_symbol == fiat_symbol:\n return 1.0\n\n if crypto_symbol not in self._cryptomap:\n # return 0 for unsupported stake currencies (fiat-convert should not break the bot)\n logger.warning(\"unsupported crypto-symbol %s - returning 0.0\", crypto_symbol)\n return 0.0\n\n try:\n return float(\n self._coinmarketcap.ticker(\n currency=self._cryptomap[crypto_symbol],\n convert=fiat_symbol\n )['data']['quotes'][fiat_symbol.upper()]['price']\n )\n except BaseException as exception:\n logger.error(\"Error in _find_price: %s\", exception)\n return 0.0\n", "path": "freqtrade/rpc/fiat_convert.py"}]}
3,477
133
gh_patches_debug_24208
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2236
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- changing group of plan in django does not work I cannot change the group of a plan in django. Dropdown is there but changes won't be saved. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/plans/admin.py` Content: ``` 1 from django.contrib import admin 2 3 from . import models 4 5 6 @admin.register(models.Plan) 7 class OfflineEventAdmin(admin.ModelAdmin): 8 list_display = ('__str__', 'organisation', 'created') 9 10 date_hierarchy = 'created' 11 search_fields = ('title',) 12 readonly_fields = ('creator', ) 13 ``` Path: `meinberlin/apps/plans/forms.py` Content: ``` 1 from django import forms 2 from django.conf import settings 3 from django.utils.translation import ugettext_lazy as _ 4 5 from adhocracy4.dashboard.components.forms import ProjectDashboardForm 6 from adhocracy4.maps import widgets as maps_widgets 7 from adhocracy4.projects import models as project_models 8 9 from . import models 10 11 12 class PlanForm(forms.ModelForm): 13 14 class Meta: 15 model = models.Plan 16 fields = [ 17 'title', 18 'description_image', 19 'contact', 20 'point', 21 'point_label', 22 'district', 23 'cost', 24 'description', 25 'topics', 26 'status', 27 'participation'] 28 widgets = { 29 'point': maps_widgets.MapChoosePointWidget( 30 polygon=settings.BERLIN_POLYGON) 31 } 32 error_messages = { 33 'point': { 34 'required': _('Please locate the plan on the map.') 35 } 36 } 37 38 def __init__(self, *args, **kwargs): 39 super().__init__(*args, **kwargs) 40 self.fields['district'].empty_label = _('City wide') 41 42 43 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField): 44 45 widget = forms.Select 46 47 def clean(self, value): 48 if not value: 49 return super().clean([]) 50 return super().clean([value]) 51 52 53 class ProjectPlansDashboardForm(ProjectDashboardForm): 54 plans = CustomMultipleChoiceField(queryset=None, 55 label=_('Plans')) 56 57 class Meta: 58 model = project_models.Project 59 fields = ['plans'] 60 required = False 61 62 def save(self, commit=False): 63 plans = self.cleaned_data['plans'] 64 self.instance.plans.set(plans) 65 66 def __init__(self, *args, **kwargs): 67 super().__init__(*args, **kwargs) 68 self.initial['plans'] = self.instance.plans.all() 69 self.fields['plans'].required = False 70 self.fields['plans'].empty_label = '----------' 71 self.fields['plans'].queryset = \ 72 self.instance.organisation.plan_set.all() 73 ``` Path: `meinberlin/apps/plans/models.py` Content: ``` 1 from ckeditor.fields import RichTextField 2 from django.conf import settings 3 from django.contrib.auth.models import Group 4 from django.core.urlresolvers import reverse 5 from django.db import models 6 from django.utils.functional import cached_property 7 from django.utils.translation import ugettext_lazy as _ 8 9 from adhocracy4 import transforms 10 from adhocracy4.administrative_districts.models import AdministrativeDistrict 11 from adhocracy4.images.fields import ConfiguredImageField 12 from adhocracy4.maps import fields as map_fields 13 from adhocracy4.models.base import UserGeneratedContentModel 14 from adhocracy4.phases.models import Phase 15 from adhocracy4.projects import models as project_models 16 from adhocracy4.projects.fields import TopicField 17 18 19 class Plan(UserGeneratedContentModel): 20 21 PARTICIPATION_YES = 0 22 PARTICIPATION_NO = 1 23 PARTICIPATION_UNDECIDED = 2 24 PARTICIPATION_CHOICES = ( 25 (PARTICIPATION_YES, _('with')), 26 (PARTICIPATION_NO, _('without')), 27 (PARTICIPATION_UNDECIDED, _('undecided')), 28 ) 29 30 STATUS_ONGOING = 0 31 STATUS_DONE = 1 32 33 STATUS_CHOICES = ( 34 (STATUS_ONGOING, _('running')), 35 (STATUS_DONE, _('done')) 36 ) 37 38 title = models.CharField(max_length=120, verbose_name=_('Title')) 39 organisation = models.ForeignKey( 40 settings.A4_ORGANISATIONS_MODEL, 41 on_delete=models.CASCADE, 42 verbose_name=_('Organisation')) 43 projects = models.ManyToManyField( 44 project_models.Project, 45 related_name='plans', 46 blank=True 47 ) 48 group = models.ForeignKey( 49 Group, 50 on_delete=models.SET_NULL, 51 blank=True, 52 null=True) 53 point = map_fields.PointField( 54 verbose_name=_('Where can the plan be located on a map?'), 55 help_text=_('Click inside the marked area ' 56 'or type in an address to set the marker. A set ' 57 'marker can be dragged when pressed.')) 58 point_label = models.CharField( 59 default='', 60 max_length=255, 61 verbose_name=_('Label of the location'), 62 help_text=_('The label of the location is ' 63 'displayed in the detail view of the plan'), 64 ) 65 district = models.ForeignKey( 66 AdministrativeDistrict, 67 verbose_name=_('District'), 68 null=True, 69 blank=True 70 ) 71 contact = models.TextField(max_length=1000, verbose_name=_('Contact')) 72 cost = models.CharField( 73 blank=True, 74 null=True, 75 max_length=255, 76 verbose_name=_('Cost')) 77 description = RichTextField(verbose_name=_('Description')) 78 description_image = ConfiguredImageField( 79 'plan_image', 80 verbose_name=_('Add image'), 81 upload_to='plan/description_image', 82 blank=True, 83 help_prefix=_( 84 'Visualize your plan.' 85 ), 86 ) 87 topics = TopicField( 88 verbose_name=_('Topics'), 89 help_text=_('Add topics to your project.')) 90 status = models.SmallIntegerField( 91 choices=STATUS_CHOICES, 92 verbose_name=_('Status') 93 ) 94 participation = models.SmallIntegerField( 95 choices=PARTICIPATION_CHOICES, 96 verbose_name=_('Participation') 97 ) 98 99 class Meta: 100 ordering = ['-created'] 101 102 @property 103 def reference_number(self): 104 return '{:d}-{:05d}'.format(self.created.year, self.pk) 105 106 @property 107 def administrative_district(self): 108 return self.district 109 110 @property 111 def topic_names(self): 112 if hasattr(settings, 'A4_PROJECT_TOPICS'): 113 choices = dict(settings.A4_PROJECT_TOPICS) 114 return [choices[topic] for topic in self.topics] 115 return [] 116 117 @cached_property 118 def published_projects(self): 119 return self.projects.filter( 120 is_draft=False, is_public=True, is_archived=False) 121 122 @cached_property 123 def participation_string(self): 124 project_list = self.published_projects.values_list('id', flat=True) 125 phases_in_plan = Phase.objects\ 126 .select_related('module__project')\ 127 .filter(module__project_id__in=project_list)\ 128 .order_by('-start_date') 129 130 if phases_in_plan.active_phases(): 131 return _('running') 132 133 future_phases_with_start_date = phases_in_plan.future_phases()\ 134 .exclude(start_date__isnull=True) 135 136 if future_phases_with_start_date: 137 future_phase = future_phases_with_start_date.first() 138 return _('starts at {}')\ 139 .format(future_phase.start_date.strftime('%d.%m.%Y')) 140 141 def __str__(self): 142 return self.title 143 144 def get_absolute_url(self): 145 return reverse('meinberlin_plans:plan-detail', 146 kwargs=dict(pk='{:05d}'.format(self.pk), 147 year=self.created.year)) 148 149 def save(self, *args, **kwargs): 150 self.description = transforms.clean_html_field(self.description) 151 self.group = self._get_group(self.creator, self.organisation) 152 super().save(*args, **kwargs) 153 154 def _get_group(self, user, organisation): 155 user_groups = user.groups.all() 156 org_groups = organisation.groups.all() 157 shared_groups = user_groups & org_groups 158 return shared_groups.distinct().first() 159 160 def is_group_member(self, user): 161 if self.group: 162 return user.groups.filter(id=self.group.id).exists() 163 return False 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/plans/admin.py b/meinberlin/apps/plans/admin.py --- a/meinberlin/apps/plans/admin.py +++ b/meinberlin/apps/plans/admin.py @@ -4,7 +4,7 @@ @admin.register(models.Plan) -class OfflineEventAdmin(admin.ModelAdmin): +class PlanAdmin(admin.ModelAdmin): list_display = ('__str__', 'organisation', 'created') date_hierarchy = 'created' diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py --- a/meinberlin/apps/plans/forms.py +++ b/meinberlin/apps/plans/forms.py @@ -39,6 +39,14 @@ super().__init__(*args, **kwargs) self.fields['district'].empty_label = _('City wide') + def save(self, commit=True): + plan = super().save(commit=False) + group = plan._get_group(plan.creator, plan.organisation) + plan.group = group + if commit: + plan.save() + return plan + class CustomMultipleChoiceField(forms.ModelMultipleChoiceField): diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py --- a/meinberlin/apps/plans/models.py +++ b/meinberlin/apps/plans/models.py @@ -148,7 +148,6 @@ def save(self, *args, **kwargs): self.description = transforms.clean_html_field(self.description) - self.group = self._get_group(self.creator, self.organisation) super().save(*args, **kwargs) def _get_group(self, user, organisation):
{"golden_diff": "diff --git a/meinberlin/apps/plans/admin.py b/meinberlin/apps/plans/admin.py\n--- a/meinberlin/apps/plans/admin.py\n+++ b/meinberlin/apps/plans/admin.py\n@@ -4,7 +4,7 @@\n \n \n @admin.register(models.Plan)\n-class OfflineEventAdmin(admin.ModelAdmin):\n+class PlanAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'organisation', 'created')\n \n date_hierarchy = 'created'\ndiff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -39,6 +39,14 @@\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n \n+ def save(self, commit=True):\n+ plan = super().save(commit=False)\n+ group = plan._get_group(plan.creator, plan.organisation)\n+ plan.group = group\n+ if commit:\n+ plan.save()\n+ return plan\n+\n \n class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n \ndiff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py\n--- a/meinberlin/apps/plans/models.py\n+++ b/meinberlin/apps/plans/models.py\n@@ -148,7 +148,6 @@\n \n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n- self.group = self._get_group(self.creator, self.organisation)\n super().save(*args, **kwargs)\n \n def _get_group(self, user, organisation):\n", "issue": "changing group of plan in django does not work\nI cannot change the group of a plan in django. Dropdown is there but changes won't be saved.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\[email protected](models.Plan)\nclass OfflineEventAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'organisation', 'created')\n\n date_hierarchy = 'created'\n search_fields = ('title',)\n readonly_fields = ('creator', )\n", "path": "meinberlin/apps/plans/admin.py"}, {"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\n\nfrom . import models\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'topics',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.Select\n\n def clean(self, value):\n if not value:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'].required = False\n self.fields['plans'].empty_label = '----------'\n self.fields['plans'].queryset = \\\n self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}, {"content": "from ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.images.fields import ConfiguredImageField\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.fields import TopicField\n\n\nclass Plan(UserGeneratedContentModel):\n\n PARTICIPATION_YES = 0\n PARTICIPATION_NO = 1\n PARTICIPATION_UNDECIDED = 2\n PARTICIPATION_CHOICES = (\n (PARTICIPATION_YES, _('with')),\n (PARTICIPATION_NO, _('without')),\n (PARTICIPATION_UNDECIDED, _('undecided')),\n )\n\n STATUS_ONGOING = 0\n STATUS_DONE = 1\n\n STATUS_CHOICES = (\n (STATUS_ONGOING, _('running')),\n (STATUS_DONE, _('done'))\n )\n\n title = models.CharField(max_length=120, verbose_name=_('Title'))\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_('Organisation'))\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='plans',\n blank=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n blank=True,\n null=True)\n point = map_fields.PointField(\n verbose_name=_('Where can the plan be located on a map?'),\n help_text=_('Click inside the marked area '\n 'or type in an address to set the marker. A set '\n 'marker can be dragged when pressed.'))\n point_label = models.CharField(\n default='',\n max_length=255,\n verbose_name=_('Label of the location'),\n help_text=_('The label of the location is '\n 'displayed in the detail view of the plan'),\n )\n district = models.ForeignKey(\n AdministrativeDistrict,\n verbose_name=_('District'),\n null=True,\n blank=True\n )\n contact = models.TextField(max_length=1000, verbose_name=_('Contact'))\n cost = models.CharField(\n blank=True,\n null=True,\n max_length=255,\n verbose_name=_('Cost'))\n description = RichTextField(verbose_name=_('Description'))\n description_image = ConfiguredImageField(\n 'plan_image',\n verbose_name=_('Add image'),\n upload_to='plan/description_image',\n blank=True,\n help_prefix=_(\n 'Visualize your plan.'\n ),\n )\n topics = TopicField(\n verbose_name=_('Topics'),\n help_text=_('Add topics to your project.'))\n status = models.SmallIntegerField(\n choices=STATUS_CHOICES,\n verbose_name=_('Status')\n )\n participation = models.SmallIntegerField(\n choices=PARTICIPATION_CHOICES,\n verbose_name=_('Participation')\n )\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n @property\n def administrative_district(self):\n return self.district\n\n @property\n def topic_names(self):\n if hasattr(settings, 'A4_PROJECT_TOPICS'):\n choices = dict(settings.A4_PROJECT_TOPICS)\n return [choices[topic] for topic in self.topics]\n return []\n\n @cached_property\n def published_projects(self):\n return self.projects.filter(\n is_draft=False, is_public=True, is_archived=False)\n\n @cached_property\n def participation_string(self):\n project_list = self.published_projects.values_list('id', flat=True)\n phases_in_plan = Phase.objects\\\n .select_related('module__project')\\\n .filter(module__project_id__in=project_list)\\\n .order_by('-start_date')\n\n if phases_in_plan.active_phases():\n return _('running')\n\n future_phases_with_start_date = phases_in_plan.future_phases()\\\n .exclude(start_date__isnull=True)\n\n if future_phases_with_start_date:\n future_phase = future_phases_with_start_date.first()\n return _('starts at {}')\\\n .format(future_phase.start_date.strftime('%d.%m.%Y'))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk='{:05d}'.format(self.pk),\n year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n self.group = self._get_group(self.creator, self.organisation)\n super().save(*args, **kwargs)\n\n def _get_group(self, user, organisation):\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n return shared_groups.distinct().first()\n\n def is_group_member(self, user):\n if self.group:\n return user.groups.filter(id=self.group.id).exists()\n return False\n", "path": "meinberlin/apps/plans/models.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\[email protected](models.Plan)\nclass PlanAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'organisation', 'created')\n\n date_hierarchy = 'created'\n search_fields = ('title',)\n readonly_fields = ('creator', )\n", "path": "meinberlin/apps/plans/admin.py"}, {"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\n\nfrom . import models\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'topics',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n def save(self, commit=True):\n plan = super().save(commit=False)\n group = plan._get_group(plan.creator, plan.organisation)\n plan.group = group\n if commit:\n plan.save()\n return plan\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.Select\n\n def clean(self, value):\n if not value:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'].required = False\n self.fields['plans'].empty_label = '----------'\n self.fields['plans'].queryset = \\\n self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}, {"content": "from ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.images.fields import ConfiguredImageField\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.fields import TopicField\n\n\nclass Plan(UserGeneratedContentModel):\n\n PARTICIPATION_YES = 0\n PARTICIPATION_NO = 1\n PARTICIPATION_UNDECIDED = 2\n PARTICIPATION_CHOICES = (\n (PARTICIPATION_YES, _('with')),\n (PARTICIPATION_NO, _('without')),\n (PARTICIPATION_UNDECIDED, _('undecided')),\n )\n\n STATUS_ONGOING = 0\n STATUS_DONE = 1\n\n STATUS_CHOICES = (\n (STATUS_ONGOING, _('running')),\n (STATUS_DONE, _('done'))\n )\n\n title = models.CharField(max_length=120, verbose_name=_('Title'))\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_('Organisation'))\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='plans',\n blank=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n blank=True,\n null=True)\n point = map_fields.PointField(\n verbose_name=_('Where can the plan be located on a map?'),\n help_text=_('Click inside the marked area '\n 'or type in an address to set the marker. A set '\n 'marker can be dragged when pressed.'))\n point_label = models.CharField(\n default='',\n max_length=255,\n verbose_name=_('Label of the location'),\n help_text=_('The label of the location is '\n 'displayed in the detail view of the plan'),\n )\n district = models.ForeignKey(\n AdministrativeDistrict,\n verbose_name=_('District'),\n null=True,\n blank=True\n )\n contact = models.TextField(max_length=1000, verbose_name=_('Contact'))\n cost = models.CharField(\n blank=True,\n null=True,\n max_length=255,\n verbose_name=_('Cost'))\n description = RichTextField(verbose_name=_('Description'))\n description_image = ConfiguredImageField(\n 'plan_image',\n verbose_name=_('Add image'),\n upload_to='plan/description_image',\n blank=True,\n help_prefix=_(\n 'Visualize your plan.'\n ),\n )\n topics = TopicField(\n verbose_name=_('Topics'),\n help_text=_('Add topics to your project.'))\n status = models.SmallIntegerField(\n choices=STATUS_CHOICES,\n verbose_name=_('Status')\n )\n participation = models.SmallIntegerField(\n choices=PARTICIPATION_CHOICES,\n verbose_name=_('Participation')\n )\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n @property\n def administrative_district(self):\n return self.district\n\n @property\n def topic_names(self):\n if hasattr(settings, 'A4_PROJECT_TOPICS'):\n choices = dict(settings.A4_PROJECT_TOPICS)\n return [choices[topic] for topic in self.topics]\n return []\n\n @cached_property\n def published_projects(self):\n return self.projects.filter(\n is_draft=False, is_public=True, is_archived=False)\n\n @cached_property\n def participation_string(self):\n project_list = self.published_projects.values_list('id', flat=True)\n phases_in_plan = Phase.objects\\\n .select_related('module__project')\\\n .filter(module__project_id__in=project_list)\\\n .order_by('-start_date')\n\n if phases_in_plan.active_phases():\n return _('running')\n\n future_phases_with_start_date = phases_in_plan.future_phases()\\\n .exclude(start_date__isnull=True)\n\n if future_phases_with_start_date:\n future_phase = future_phases_with_start_date.first()\n return _('starts at {}')\\\n .format(future_phase.start_date.strftime('%d.%m.%Y'))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk='{:05d}'.format(self.pk),\n year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n super().save(*args, **kwargs)\n\n def _get_group(self, user, organisation):\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n return shared_groups.distinct().first()\n\n def is_group_member(self, user):\n if self.group:\n return user.groups.filter(id=self.group.id).exists()\n return False\n", "path": "meinberlin/apps/plans/models.py"}]}
2,526
380
gh_patches_debug_34435
rasdani/github-patches
git_diff
lk-geimfari__mimesis-923
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add UUID objects support for uuid() - [x] Add parameter `as_object` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mimesis/__init__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Mimesis is a Python library, which helps generate fake data. 4 5 Copyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari) 6 Repository: https://github.com/lk-geimfari/mimesis 7 Email: <[email protected]> 8 """ 9 10 from mimesis.providers import ( 11 Address, 12 BaseDataProvider, 13 BaseProvider, 14 Business, 15 Choice, 16 Clothing, 17 Code, 18 Cryptographic, 19 Datetime, 20 Development, 21 File, 22 Food, 23 Generic, 24 Hardware, 25 Internet, 26 Numbers, 27 Path, 28 Payment, 29 Person, 30 Science, 31 Structure, 32 Text, 33 Transport, 34 UnitSystem, 35 ) 36 37 __all__ = [ 38 'Address', 39 'BaseDataProvider', 40 'BaseProvider', 41 'Business', 42 'Clothing', 43 'Code', 44 'Choice', 45 'Datetime', 46 'Development', 47 'File', 48 'Food', 49 'Hardware', 50 'Internet', 51 'Numbers', 52 'Path', 53 'Payment', 54 'Person', 55 'Science', 56 'Structure', 57 'Text', 58 'Transport', 59 'UnitSystem', 60 'Cryptographic', 61 62 # Has all: 63 'Generic', 64 65 # Meta: 66 '__version__', 67 '__title__', 68 '__description__', 69 '__url__', 70 '__author__', 71 '__author_email__', 72 '__license__', 73 ] 74 75 __version__ = '4.1.0' 76 __title__ = 'mimesis' 77 __description__ = 'Mimesis: fake data generator.' 78 __url__ = 'https://github.com/lk-geimfari/mimesis' 79 __author__ = 'Isaak Uchakaev (Likid Geimfari)' 80 __author_email__ = '[email protected]' 81 __license__ = 'MIT License' 82 ``` Path: `mimesis/providers/cryptographic.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Cryptographic data provider.""" 4 5 import hashlib 6 import secrets 7 from typing import Optional, Union 8 from uuid import UUID 9 10 from mimesis.enums import Algorithm 11 from mimesis.providers.base import BaseProvider 12 from mimesis.providers.text import Text 13 14 __all__ = ['Cryptographic'] 15 16 17 class Cryptographic(BaseProvider): 18 """Class that provides cryptographic data.""" 19 20 def __init__(self, *args, **kwargs) -> None: 21 """Initialize attributes. 22 23 :param seed: Seed. 24 """ 25 super().__init__(*args, **kwargs) 26 self.__words = Text('en')._data.get('words', {}) 27 28 class Meta: 29 """Class for metadata.""" 30 31 name = 'cryptographic' 32 33 def uuid(self, version: int = None, 34 as_object: bool = False) -> Union[UUID, str]: 35 """Generate random UUID. 36 37 This method returns string by default, 38 but yoy can make it return uuid.UUID object using 39 parameter **as_object** 40 41 :param as_object: Returns uuid.UUID object instead of string. 42 :param version: UUID version. 43 :return: UUID. 44 """ 45 bits = self.random.getrandbits(128) 46 uuid_obj = UUID(int=bits, version=version) 47 48 if not as_object: 49 return str(uuid_obj) 50 51 return uuid_obj 52 53 def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003 54 """Generate random hash. 55 56 To change hashing algorithm, pass parameter ``algorithm`` 57 with needed value of the enum object :class:`~mimesis.enums.Algorithm` 58 59 :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`. 60 :return: Hash. 61 :raises NonEnumerableError: When algorithm is unsupported. 62 """ 63 key = self._validate_enum(algorithm, Algorithm) 64 65 if hasattr(hashlib, key): 66 fn = getattr(hashlib, key) 67 return fn(self.uuid().encode()).hexdigest() 68 69 @staticmethod 70 def token_bytes(entropy: int = 32) -> bytes: 71 """Generate byte string containing ``entropy`` bytes. 72 73 The string has ``entropy`` random bytes, each byte 74 converted to two hex digits. 75 76 .. warning:: Seed is not applicable to this method, 77 because of its cryptographic-safe nature. 78 79 :param entropy: Number of bytes (default: 32). 80 :return: Random bytes. 81 """ 82 return secrets.token_bytes(entropy) 83 84 @staticmethod 85 def token_hex(entropy: int = 32) -> str: 86 """Return a random text string, in hexadecimal. 87 88 The string has *entropy* random bytes, each byte converted to two 89 hex digits. If *entropy* is ``None`` or not supplied, a reasonable 90 default is used. 91 92 .. warning:: Seed is not applicable to this method, 93 because of its cryptographic-safe nature. 94 95 :param entropy: Number of bytes (default: 32). 96 :return: Token. 97 """ 98 return secrets.token_hex(entropy) 99 100 @staticmethod 101 def token_urlsafe(entropy: int = 32): 102 """Return a random URL-safe text string, in Base64 encoding. 103 104 The string has *entropy* random bytes. If *entropy* is ``None`` 105 or not supplied, a reasonable default is used. 106 107 .. warning:: Seed is not applicable to this method, 108 because of its cryptographic-safe nature. 109 110 :param entropy: Number of bytes (default: 32). 111 :return: URL-safe token. 112 """ 113 return secrets.token_urlsafe(entropy) 114 115 def mnemonic_phrase(self, length: int = 12, 116 separator: Optional[str] = None) -> str: 117 """Generate pseudo mnemonic phrase. 118 119 Please, keep in mind that this method generates 120 crypto-insecure values. 121 122 :param separator: Separator of phrases (Default is " "). 123 :param length: Number of words. 124 :return: Mnemonic phrase. 125 """ 126 if not separator: 127 separator = ' ' 128 129 words = self.__words['normal'] 130 words_generator = (self.random.choice(words) for _ in range(length)) 131 return '{}'.format(separator).join(words_generator) 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mimesis/__init__.py b/mimesis/__init__.py --- a/mimesis/__init__.py +++ b/mimesis/__init__.py @@ -72,7 +72,7 @@ '__license__', ] -__version__ = '4.1.0' +__version__ = '4.1.1' __title__ = 'mimesis' __description__ = 'Mimesis: fake data generator.' __url__ = 'https://github.com/lk-geimfari/mimesis' diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py --- a/mimesis/providers/cryptographic.py +++ b/mimesis/providers/cryptographic.py @@ -5,7 +5,7 @@ import hashlib import secrets from typing import Optional, Union -from uuid import UUID +from uuid import UUID, uuid4 from mimesis.enums import Algorithm from mimesis.providers.base import BaseProvider @@ -30,25 +30,23 @@ name = 'cryptographic' - def uuid(self, version: int = None, - as_object: bool = False) -> Union[UUID, str]: - """Generate random UUID. + @staticmethod + def uuid(as_object: bool = False) -> Union[UUID, str]: + """Generate random UUID4. This method returns string by default, - but yoy can make it return uuid.UUID object using + but you can make it return uuid.UUID object using parameter **as_object** - :param as_object: Returns uuid.UUID object instead of string. - :param version: UUID version. + :param as_object: Returns uuid.UUID. :return: UUID. """ - bits = self.random.getrandbits(128) - uuid_obj = UUID(int=bits, version=version) + _uuid = uuid4() if not as_object: - return str(uuid_obj) + return str(_uuid) - return uuid_obj + return _uuid def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003 """Generate random hash. @@ -64,7 +62,7 @@ if hasattr(hashlib, key): fn = getattr(hashlib, key) - return fn(self.uuid().encode()).hexdigest() + return fn(self.uuid().encode()).hexdigest() # type: ignore @staticmethod def token_bytes(entropy: int = 32) -> bytes:
{"golden_diff": "diff --git a/mimesis/__init__.py b/mimesis/__init__.py\n--- a/mimesis/__init__.py\n+++ b/mimesis/__init__.py\n@@ -72,7 +72,7 @@\n '__license__',\n ]\n \n-__version__ = '4.1.0'\n+__version__ = '4.1.1'\n __title__ = 'mimesis'\n __description__ = 'Mimesis: fake data generator.'\n __url__ = 'https://github.com/lk-geimfari/mimesis'\ndiff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py\n--- a/mimesis/providers/cryptographic.py\n+++ b/mimesis/providers/cryptographic.py\n@@ -5,7 +5,7 @@\n import hashlib\n import secrets\n from typing import Optional, Union\n-from uuid import UUID\n+from uuid import UUID, uuid4\n \n from mimesis.enums import Algorithm\n from mimesis.providers.base import BaseProvider\n@@ -30,25 +30,23 @@\n \n name = 'cryptographic'\n \n- def uuid(self, version: int = None,\n- as_object: bool = False) -> Union[UUID, str]:\n- \"\"\"Generate random UUID.\n+ @staticmethod\n+ def uuid(as_object: bool = False) -> Union[UUID, str]:\n+ \"\"\"Generate random UUID4.\n \n This method returns string by default,\n- but yoy can make it return uuid.UUID object using\n+ but you can make it return uuid.UUID object using\n parameter **as_object**\n \n- :param as_object: Returns uuid.UUID object instead of string.\n- :param version: UUID version.\n+ :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n- bits = self.random.getrandbits(128)\n- uuid_obj = UUID(int=bits, version=version)\n+ _uuid = uuid4()\n \n if not as_object:\n- return str(uuid_obj)\n+ return str(_uuid)\n \n- return uuid_obj\n+ return _uuid\n \n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n@@ -64,7 +62,7 @@\n \n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n- return fn(self.uuid().encode()).hexdigest()\n+ return fn(self.uuid().encode()).hexdigest() # type: ignore\n \n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n", "issue": "Add UUID objects support for uuid()\n- [x] Add parameter `as_object` \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Mimesis is a Python library, which helps generate fake data.\n\nCopyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)\nRepository: https://github.com/lk-geimfari/mimesis\nEmail: <[email protected]>\n\"\"\"\n\nfrom mimesis.providers import (\n Address,\n BaseDataProvider,\n BaseProvider,\n Business,\n Choice,\n Clothing,\n Code,\n Cryptographic,\n Datetime,\n Development,\n File,\n Food,\n Generic,\n Hardware,\n Internet,\n Numbers,\n Path,\n Payment,\n Person,\n Science,\n Structure,\n Text,\n Transport,\n UnitSystem,\n)\n\n__all__ = [\n 'Address',\n 'BaseDataProvider',\n 'BaseProvider',\n 'Business',\n 'Clothing',\n 'Code',\n 'Choice',\n 'Datetime',\n 'Development',\n 'File',\n 'Food',\n 'Hardware',\n 'Internet',\n 'Numbers',\n 'Path',\n 'Payment',\n 'Person',\n 'Science',\n 'Structure',\n 'Text',\n 'Transport',\n 'UnitSystem',\n 'Cryptographic',\n\n # Has all:\n 'Generic',\n\n # Meta:\n '__version__',\n '__title__',\n '__description__',\n '__url__',\n '__author__',\n '__author_email__',\n '__license__',\n]\n\n__version__ = '4.1.0'\n__title__ = 'mimesis'\n__description__ = 'Mimesis: fake data generator.'\n__url__ = 'https://github.com/lk-geimfari/mimesis'\n__author__ = 'Isaak Uchakaev (Likid Geimfari)'\n__author_email__ = '[email protected]'\n__license__ = 'MIT License'\n", "path": "mimesis/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n def uuid(self, version: int = None,\n as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID.\n\n This method returns string by default,\n but yoy can make it return uuid.UUID object using\n parameter **as_object**\n\n :param as_object: Returns uuid.UUID object instead of string.\n :param version: UUID version.\n :return: UUID.\n \"\"\"\n bits = self.random.getrandbits(128)\n uuid_obj = UUID(int=bits, version=version)\n\n if not as_object:\n return str(uuid_obj)\n\n return uuid_obj\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Mimesis is a Python library, which helps generate fake data.\n\nCopyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)\nRepository: https://github.com/lk-geimfari/mimesis\nEmail: <[email protected]>\n\"\"\"\n\nfrom mimesis.providers import (\n Address,\n BaseDataProvider,\n BaseProvider,\n Business,\n Choice,\n Clothing,\n Code,\n Cryptographic,\n Datetime,\n Development,\n File,\n Food,\n Generic,\n Hardware,\n Internet,\n Numbers,\n Path,\n Payment,\n Person,\n Science,\n Structure,\n Text,\n Transport,\n UnitSystem,\n)\n\n__all__ = [\n 'Address',\n 'BaseDataProvider',\n 'BaseProvider',\n 'Business',\n 'Clothing',\n 'Code',\n 'Choice',\n 'Datetime',\n 'Development',\n 'File',\n 'Food',\n 'Hardware',\n 'Internet',\n 'Numbers',\n 'Path',\n 'Payment',\n 'Person',\n 'Science',\n 'Structure',\n 'Text',\n 'Transport',\n 'UnitSystem',\n 'Cryptographic',\n\n # Has all:\n 'Generic',\n\n # Meta:\n '__version__',\n '__title__',\n '__description__',\n '__url__',\n '__author__',\n '__author_email__',\n '__license__',\n]\n\n__version__ = '4.1.1'\n__title__ = 'mimesis'\n__description__ = 'Mimesis: fake data generator.'\n__url__ = 'https://github.com/lk-geimfari/mimesis'\n__author__ = 'Isaak Uchakaev (Likid Geimfari)'\n__author_email__ = '[email protected]'\n__license__ = 'MIT License'\n", "path": "mimesis/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID, uuid4\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n @staticmethod\n def uuid(as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID4.\n\n This method returns string by default,\n but you can make it return uuid.UUID object using\n parameter **as_object**\n\n :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n _uuid = uuid4()\n\n if not as_object:\n return str(_uuid)\n\n return _uuid\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest() # type: ignore\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}]}
2,091
576
gh_patches_debug_506
rasdani/github-patches
git_diff
liqd__a4-opin-765
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add test for email sending after moderator was added to project --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `euth/projects/emails.py` Content: ``` 1 from euth.contrib.emails import OpinEmail 2 3 4 class ModeratorAddedEmail(OpinEmail): 5 template_name = 'notify_new_moderator' 6 7 def get_recipients(self): 8 self.kwargs['user'] 9 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/euth/projects/emails.py b/euth/projects/emails.py --- a/euth/projects/emails.py +++ b/euth/projects/emails.py @@ -4,5 +4,5 @@ class ModeratorAddedEmail(OpinEmail): template_name = 'notify_new_moderator' - def get_recipients(self): - self.kwargs['user'] + def get_receivers(self): + return [self.kwargs['user']]
{"golden_diff": "diff --git a/euth/projects/emails.py b/euth/projects/emails.py\n--- a/euth/projects/emails.py\n+++ b/euth/projects/emails.py\n@@ -4,5 +4,5 @@\n class ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n \n- def get_recipients(self):\n- self.kwargs['user']\n+ def get_receivers(self):\n+ return [self.kwargs['user']]\n", "issue": "add test for email sending after moderator was added to project\n\n", "before_files": [{"content": "from euth.contrib.emails import OpinEmail\n\n\nclass ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n\n def get_recipients(self):\n self.kwargs['user']\n", "path": "euth/projects/emails.py"}], "after_files": [{"content": "from euth.contrib.emails import OpinEmail\n\n\nclass ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n\n def get_receivers(self):\n return [self.kwargs['user']]\n", "path": "euth/projects/emails.py"}]}
326
99
gh_patches_debug_3514
rasdani/github-patches
git_diff
encode__uvicorn-296
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deploying inside a process manager: must account for inherited socket family and type Hi, I get an error when trying to run uvicorn with Circus, but only if using a socket with file descriptor (which I'd like to do to increase the number of processes). circus.ini: ``` [socket:web] host = 0.0.0.0 port = 80 [watcher:uvicorn] cmd = uvicorn # args = --host 0.0.0.0 --port 80 satemo.asgi:application # --> works ok args = --fd $(circus.sockets.web) satemo.asgi:application # --> Fails use_sockets = True numprocesses = 1 ``` Error: ``` File "c:\nss\venv\channels2\lib\site-packages\uvicorn\main.py", line 247, in startup sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) UnboundLocalError: local variable 'socket' referenced before assignment ``` Environment: * Windows 10 * Python 3.6.5 * uvicorn 0.4.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `uvicorn/main.py` Content: ``` 1 import asyncio 2 import functools 3 import os 4 import signal 5 import sys 6 import time 7 from email.utils import formatdate 8 9 import click 10 11 from uvicorn.config import ( 12 HTTP_PROTOCOLS, 13 LIFESPAN, 14 LOG_LEVELS, 15 LOOP_SETUPS, 16 WS_PROTOCOLS, 17 Config, 18 get_logger, 19 ) 20 from uvicorn.reloaders.statreload import StatReload 21 22 LEVEL_CHOICES = click.Choice(LOG_LEVELS.keys()) 23 HTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys()) 24 WS_CHOICES = click.Choice(WS_PROTOCOLS.keys()) 25 LIFESPAN_CHOICES = click.Choice(LIFESPAN.keys()) 26 LOOP_CHOICES = click.Choice(LOOP_SETUPS.keys()) 27 28 HANDLED_SIGNALS = ( 29 signal.SIGINT, # Unix signal 2. Sent by Ctrl+C. 30 signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`. 31 ) 32 33 34 @click.command() 35 @click.argument("app") 36 @click.option( 37 "--host", 38 type=str, 39 default="127.0.0.1", 40 help="Bind socket to this host.", 41 show_default=True, 42 ) 43 @click.option( 44 "--port", 45 type=int, 46 default=8000, 47 help="Bind socket to this port.", 48 show_default=True, 49 ) 50 @click.option("--uds", type=str, default=None, help="Bind to a UNIX domain socket.") 51 @click.option( 52 "--fd", type=int, default=None, help="Bind to socket from this file descriptor." 53 ) 54 @click.option( 55 "--loop", 56 type=LOOP_CHOICES, 57 default="auto", 58 help="Event loop implementation.", 59 show_default=True, 60 ) 61 @click.option( 62 "--http", 63 type=HTTP_CHOICES, 64 default="auto", 65 help="HTTP protocol implementation.", 66 show_default=True, 67 ) 68 @click.option( 69 "--ws", 70 type=WS_CHOICES, 71 default="auto", 72 help="WebSocket protocol implementation.", 73 show_default=True, 74 ) 75 @click.option( 76 "--lifespan", 77 type=LIFESPAN_CHOICES, 78 default="auto", 79 help="Lifespan implementation.", 80 show_default=True, 81 ) 82 @click.option( 83 "--wsgi", 84 is_flag=True, 85 default=False, 86 help="Use WSGI as the application interface, instead of ASGI.", 87 ) 88 @click.option("--debug", is_flag=True, default=False, help="Enable debug mode.") 89 @click.option( 90 "--log-level", 91 type=LEVEL_CHOICES, 92 default="info", 93 help="Log level.", 94 show_default=True, 95 ) 96 @click.option( 97 "--no-access-log", is_flag=True, default=False, help="Disable access log." 98 ) 99 @click.option( 100 "--proxy-headers", 101 is_flag=True, 102 default=False, 103 help="Use X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info.", 104 ) 105 @click.option( 106 "--root-path", 107 type=str, 108 default="", 109 help="Set the ASGI 'root_path' for applications submounted below a given URL path.", 110 ) 111 @click.option( 112 "--limit-concurrency", 113 type=int, 114 default=None, 115 help="Maximum number of concurrent connections or tasks to allow, before issuing HTTP 503 responses.", 116 ) 117 @click.option( 118 "--limit-max-requests", 119 type=int, 120 default=None, 121 help="Maximum number of requests to service before terminating the process.", 122 ) 123 @click.option( 124 "--timeout-keep-alive", 125 type=int, 126 default=5, 127 help="Close Keep-Alive connections if no new data is received within this timeout.", 128 show_default=True, 129 ) 130 def main( 131 app, 132 host: str, 133 port: int, 134 uds: str, 135 fd: int, 136 loop: str, 137 http: str, 138 ws: str, 139 lifespan: str, 140 wsgi: bool, 141 debug: bool, 142 log_level: str, 143 no_access_log: bool, 144 proxy_headers: bool, 145 root_path: str, 146 limit_concurrency: int, 147 limit_max_requests: int, 148 timeout_keep_alive: int, 149 ): 150 sys.path.insert(0, ".") 151 152 kwargs = { 153 "app": app, 154 "host": host, 155 "port": port, 156 "uds": uds, 157 "fd": fd, 158 "loop": loop, 159 "http": http, 160 "ws": ws, 161 "lifespan": lifespan, 162 "log_level": log_level, 163 "access_log": not no_access_log, 164 "wsgi": wsgi, 165 "debug": debug, 166 "proxy_headers": proxy_headers, 167 "root_path": root_path, 168 "limit_concurrency": limit_concurrency, 169 "limit_max_requests": limit_max_requests, 170 "timeout_keep_alive": timeout_keep_alive, 171 } 172 173 if debug: 174 logger = get_logger(log_level) 175 reloader = StatReload(logger) 176 reloader.run(run, kwargs) 177 else: 178 run(**kwargs) 179 180 181 def run(app, **kwargs): 182 config = Config(app, **kwargs) 183 server = Server(config=config) 184 server.run() 185 186 187 class ServerState: 188 """ 189 Shared servers state that is available between all protocol instances. 190 """ 191 192 def __init__(self): 193 self.total_requests = 0 194 self.connections = set() 195 self.tasks = set() 196 self.default_headers = [] 197 198 199 class Server: 200 def __init__(self, config): 201 self.config = config 202 self.server_state = ServerState() 203 204 self.started = False 205 self.should_exit = False 206 self.force_exit = False 207 208 def run(self): 209 process_id = os.getpid() 210 211 config = self.config 212 if not config.loaded: 213 config.load() 214 215 self.loop = config.loop_instance 216 self.logger = config.logger_instance 217 self.lifespan = config.lifespan_class(config) 218 219 self.install_signal_handlers() 220 221 self.logger.info("Started server process [{}]".format(process_id)) 222 self.loop.run_until_complete(self.startup()) 223 self.loop.run_until_complete(self.main_loop()) 224 self.loop.run_until_complete(self.shutdown()) 225 self.loop.stop() 226 self.logger.info("Finished server process [{}]".format(process_id)) 227 228 async def startup(self): 229 config = self.config 230 231 await self.lifespan.startup() 232 233 create_protocol = functools.partial( 234 config.http_protocol_class, config=config, server_state=self.server_state 235 ) 236 237 if config.sockets is not None: 238 # Explicitly passed a list of open sockets. 239 # We use this when the server is run from a Gunicorn worker. 240 self.servers = [] 241 for socket in config.sockets: 242 server = await self.loop.create_server(create_protocol, sock=socket) 243 self.servers.append(server) 244 245 elif config.fd is not None: 246 # Use an existing socket, from a file descriptor. 247 sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) 248 server = await self.loop.create_server(create_protocol, sock=sock) 249 message = "Uvicorn running on socket %s (Press CTRL+C to quit)" 250 self.logger.info(message % str(sock.getsockname())) 251 self.servers = [server] 252 253 elif config.uds is not None: 254 # Create a socket using UNIX domain socket. 255 server = await self.loop.create_unix_server( 256 create_protocol, path=config.uds 257 ) 258 message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)" 259 self.logger.info(message % config.uds) 260 self.servers = [server] 261 262 else: 263 # Standard case. Create a socket from a host/port pair. 264 server = await self.loop.create_server( 265 create_protocol, host=config.host, port=config.port 266 ) 267 message = "Uvicorn running on http://%s:%d (Press CTRL+C to quit)" 268 self.logger.info(message % (config.host, config.port)) 269 self.servers = [server] 270 271 self.started = True 272 273 async def main_loop(self): 274 counter = 0 275 should_exit = await self.on_tick(counter) 276 while not should_exit: 277 counter += 1 278 counter = counter % 864000 279 await asyncio.sleep(0.1) 280 should_exit = await self.on_tick(counter) 281 282 async def on_tick(self, counter) -> bool: 283 # Update the default headers, once per second. 284 if counter % 10 == 0: 285 current_time = time.time() 286 current_date = formatdate(current_time, usegmt=True).encode() 287 self.server_state.default_headers = [ 288 (b"server", b"uvicorn"), 289 (b"date", current_date), 290 ] 291 292 # Callback to `callback_notify` once every `timeout_notify` seconds. 293 if self.config.callback_notify is not None: 294 if counter % (10 * self.config.timeout_notify) == 0: 295 await self.config.callback_notify() 296 297 # Determine if we should exit. 298 if self.should_exit: 299 return True 300 if self.config.limit_max_requests is not None: 301 return self.server_state.total_requests >= self.config.limit_max_requests 302 return False 303 304 async def shutdown(self): 305 self.logger.info("Shutting down") 306 307 # Stop accepting new connections. 308 if not self.config.sockets: 309 for server in self.servers: 310 server.close() 311 for server in self.servers: 312 await server.wait_closed() 313 314 # Request shutdown on all existing connections. 315 for connection in list(self.server_state.connections): 316 connection.shutdown() 317 await asyncio.sleep(0.1) 318 319 # Wait for existing connections to finish sending responses. 320 if self.server_state.connections and not self.force_exit: 321 msg = "Waiting for connections to close. (CTRL+C to force quit)" 322 self.logger.info(msg) 323 while self.server_state.connections and not self.force_exit: 324 await asyncio.sleep(0.1) 325 326 # Wait for existing tasks to complete. 327 if self.server_state.tasks and not self.force_exit: 328 msg = "Waiting for background tasks to complete. (CTRL+C to force quit)" 329 self.logger.info(msg) 330 while self.server_state.tasks and not self.force_exit: 331 await asyncio.sleep(0.1) 332 333 # Send the lifespan shutdown event, and wait for application shutdown. 334 if not self.force_exit: 335 await self.lifespan.shutdown() 336 337 def install_signal_handlers(self): 338 try: 339 for sig in HANDLED_SIGNALS: 340 self.loop.add_signal_handler(sig, self.handle_exit, sig, None) 341 except NotImplementedError as exc: 342 # Windows 343 for sig in HANDLED_SIGNALS: 344 signal.signal(sig, self.handle_exit) 345 346 def handle_exit(self, sig, frame): 347 if self.should_exit: 348 self.force_exit = True 349 else: 350 self.should_exit = True 351 352 353 if __name__ == "__main__": 354 main() 355 ``` Path: `uvicorn/__init__.py` Content: ``` 1 from uvicorn.main import main, run 2 3 __version__ = "0.4.1" 4 __all__ = ["main", "run"] 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py --- a/uvicorn/__init__.py +++ b/uvicorn/__init__.py @@ -1,4 +1,4 @@ from uvicorn.main import main, run -__version__ = "0.4.1" +__version__ = "0.4.2" __all__ = ["main", "run"] diff --git a/uvicorn/main.py b/uvicorn/main.py --- a/uvicorn/main.py +++ b/uvicorn/main.py @@ -2,6 +2,7 @@ import functools import os import signal +import socket import sys import time from email.utils import formatdate
{"golden_diff": "diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py\n--- a/uvicorn/__init__.py\n+++ b/uvicorn/__init__.py\n@@ -1,4 +1,4 @@\n from uvicorn.main import main, run\n \n-__version__ = \"0.4.1\"\n+__version__ = \"0.4.2\"\n __all__ = [\"main\", \"run\"]\ndiff --git a/uvicorn/main.py b/uvicorn/main.py\n--- a/uvicorn/main.py\n+++ b/uvicorn/main.py\n@@ -2,6 +2,7 @@\n import functools\n import os\n import signal\n+import socket\n import sys\n import time\n from email.utils import formatdate\n", "issue": "Deploying inside a process manager: must account for inherited socket family and type\nHi,\r\n\r\nI get an error when trying to run uvicorn with Circus, but only if using a socket with file descriptor (which I'd like to do to increase the number of processes).\r\n\r\ncircus.ini:\r\n```\r\n[socket:web]\r\nhost = 0.0.0.0\r\nport = 80\r\n[watcher:uvicorn]\r\ncmd = uvicorn\r\n# args = --host 0.0.0.0 --port 80 satemo.asgi:application \r\n# --> works ok\r\nargs = --fd $(circus.sockets.web) satemo.asgi:application \r\n# --> Fails\r\nuse_sockets = True\r\nnumprocesses = 1\r\n```\r\n\r\nError:\r\n```\r\nFile \"c:\\nss\\venv\\channels2\\lib\\site-packages\\uvicorn\\main.py\", line 247, in startup\r\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\r\nUnboundLocalError: local variable 'socket' referenced before assignment\r\n```\r\n\r\nEnvironment:\r\n* Windows 10\r\n* Python 3.6.5\r\n* uvicorn 0.4.1\r\n\n", "before_files": [{"content": "import asyncio\nimport functools\nimport os\nimport signal\nimport sys\nimport time\nfrom email.utils import formatdate\n\nimport click\n\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n LIFESPAN,\n LOG_LEVELS,\n LOOP_SETUPS,\n WS_PROTOCOLS,\n Config,\n get_logger,\n)\nfrom uvicorn.reloaders.statreload import StatReload\n\nLEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())\nHTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())\nWS_CHOICES = click.Choice(WS_PROTOCOLS.keys())\nLIFESPAN_CHOICES = click.Choice(LIFESPAN.keys())\nLOOP_CHOICES = click.Choice(LOOP_SETUPS.keys())\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\n\[email protected]()\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--wsgi\",\n is_flag=True,\n default=False,\n help=\"Use WSGI as the application interface, instead of ASGI.\",\n)\[email protected](\"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\")\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=\"info\",\n help=\"Log level.\",\n show_default=True,\n)\[email protected](\n \"--no-access-log\", is_flag=True, default=False, help=\"Disable access log.\"\n)\[email protected](\n \"--proxy-headers\",\n is_flag=True,\n default=False,\n help=\"Use X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing HTTP 503 responses.\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\ndef main(\n app,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: str,\n http: str,\n ws: str,\n lifespan: str,\n wsgi: bool,\n debug: bool,\n log_level: str,\n no_access_log: bool,\n proxy_headers: bool,\n root_path: str,\n limit_concurrency: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n):\n sys.path.insert(0, \".\")\n\n kwargs = {\n \"app\": app,\n \"host\": host,\n \"port\": port,\n \"uds\": uds,\n \"fd\": fd,\n \"loop\": loop,\n \"http\": http,\n \"ws\": ws,\n \"lifespan\": lifespan,\n \"log_level\": log_level,\n \"access_log\": not no_access_log,\n \"wsgi\": wsgi,\n \"debug\": debug,\n \"proxy_headers\": proxy_headers,\n \"root_path\": root_path,\n \"limit_concurrency\": limit_concurrency,\n \"limit_max_requests\": limit_max_requests,\n \"timeout_keep_alive\": timeout_keep_alive,\n }\n\n if debug:\n logger = get_logger(log_level)\n reloader = StatReload(logger)\n reloader.run(run, kwargs)\n else:\n run(**kwargs)\n\n\ndef run(app, **kwargs):\n config = Config(app, **kwargs)\n server = Server(config=config)\n server.run()\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self):\n self.total_requests = 0\n self.connections = set()\n self.tasks = set()\n self.default_headers = []\n\n\nclass Server:\n def __init__(self, config):\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n\n def run(self):\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.loop = config.loop_instance\n self.logger = config.logger_instance\n self.lifespan = config.lifespan_class(config)\n\n self.install_signal_handlers()\n\n self.logger.info(\"Started server process [{}]\".format(process_id))\n self.loop.run_until_complete(self.startup())\n self.loop.run_until_complete(self.main_loop())\n self.loop.run_until_complete(self.shutdown())\n self.loop.stop()\n self.logger.info(\"Finished server process [{}]\".format(process_id))\n\n async def startup(self):\n config = self.config\n\n await self.lifespan.startup()\n\n create_protocol = functools.partial(\n config.http_protocol_class, config=config, server_state=self.server_state\n )\n\n if config.sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n self.servers = []\n for socket in config.sockets:\n server = await self.loop.create_server(create_protocol, sock=socket)\n self.servers.append(server)\n\n elif config.fd is not None:\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await self.loop.create_server(create_protocol, sock=sock)\n message = \"Uvicorn running on socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % str(sock.getsockname()))\n self.servers = [server]\n\n elif config.uds is not None:\n # Create a socket using UNIX domain socket.\n server = await self.loop.create_unix_server(\n create_protocol, path=config.uds\n )\n message = \"Uvicorn running on unix socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % config.uds)\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n server = await self.loop.create_server(\n create_protocol, host=config.host, port=config.port\n )\n message = \"Uvicorn running on http://%s:%d (Press CTRL+C to quit)\"\n self.logger.info(message % (config.host, config.port))\n self.servers = [server]\n\n self.started = True\n\n async def main_loop(self):\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n self.server_state.default_headers = [\n (b\"server\", b\"uvicorn\"),\n (b\"date\", current_date),\n ]\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if counter % (10 * self.config.timeout_notify) == 0:\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self):\n self.logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n if not self.config.sockets:\n for server in self.servers:\n server.close()\n for server in self.servers:\n await server.wait_closed()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n self.logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n self.logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n def install_signal_handlers(self):\n try:\n for sig in HANDLED_SIGNALS:\n self.loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError as exc:\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig, frame):\n if self.should_exit:\n self.force_exit = True\n else:\n self.should_exit = True\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "uvicorn/main.py"}, {"content": "from uvicorn.main import main, run\n\n__version__ = \"0.4.1\"\n__all__ = [\"main\", \"run\"]\n", "path": "uvicorn/__init__.py"}], "after_files": [{"content": "import asyncio\nimport functools\nimport os\nimport signal\nimport socket\nimport sys\nimport time\nfrom email.utils import formatdate\n\nimport click\n\nfrom uvicorn.config import (\n HTTP_PROTOCOLS,\n LIFESPAN,\n LOG_LEVELS,\n LOOP_SETUPS,\n WS_PROTOCOLS,\n Config,\n get_logger,\n)\nfrom uvicorn.reloaders.statreload import StatReload\n\nLEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())\nHTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())\nWS_CHOICES = click.Choice(WS_PROTOCOLS.keys())\nLIFESPAN_CHOICES = click.Choice(LIFESPAN.keys())\nLOOP_CHOICES = click.Choice(LOOP_SETUPS.keys())\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\n\[email protected]()\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--lifespan\",\n type=LIFESPAN_CHOICES,\n default=\"auto\",\n help=\"Lifespan implementation.\",\n show_default=True,\n)\[email protected](\n \"--wsgi\",\n is_flag=True,\n default=False,\n help=\"Use WSGI as the application interface, instead of ASGI.\",\n)\[email protected](\"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\")\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=\"info\",\n help=\"Log level.\",\n show_default=True,\n)\[email protected](\n \"--no-access-log\", is_flag=True, default=False, help=\"Disable access log.\"\n)\[email protected](\n \"--proxy-headers\",\n is_flag=True,\n default=False,\n help=\"Use X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing HTTP 503 responses.\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\ndef main(\n app,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: str,\n http: str,\n ws: str,\n lifespan: str,\n wsgi: bool,\n debug: bool,\n log_level: str,\n no_access_log: bool,\n proxy_headers: bool,\n root_path: str,\n limit_concurrency: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n):\n sys.path.insert(0, \".\")\n\n kwargs = {\n \"app\": app,\n \"host\": host,\n \"port\": port,\n \"uds\": uds,\n \"fd\": fd,\n \"loop\": loop,\n \"http\": http,\n \"ws\": ws,\n \"lifespan\": lifespan,\n \"log_level\": log_level,\n \"access_log\": not no_access_log,\n \"wsgi\": wsgi,\n \"debug\": debug,\n \"proxy_headers\": proxy_headers,\n \"root_path\": root_path,\n \"limit_concurrency\": limit_concurrency,\n \"limit_max_requests\": limit_max_requests,\n \"timeout_keep_alive\": timeout_keep_alive,\n }\n\n if debug:\n logger = get_logger(log_level)\n reloader = StatReload(logger)\n reloader.run(run, kwargs)\n else:\n run(**kwargs)\n\n\ndef run(app, **kwargs):\n config = Config(app, **kwargs)\n server = Server(config=config)\n server.run()\n\n\nclass ServerState:\n \"\"\"\n Shared servers state that is available between all protocol instances.\n \"\"\"\n\n def __init__(self):\n self.total_requests = 0\n self.connections = set()\n self.tasks = set()\n self.default_headers = []\n\n\nclass Server:\n def __init__(self, config):\n self.config = config\n self.server_state = ServerState()\n\n self.started = False\n self.should_exit = False\n self.force_exit = False\n\n def run(self):\n process_id = os.getpid()\n\n config = self.config\n if not config.loaded:\n config.load()\n\n self.loop = config.loop_instance\n self.logger = config.logger_instance\n self.lifespan = config.lifespan_class(config)\n\n self.install_signal_handlers()\n\n self.logger.info(\"Started server process [{}]\".format(process_id))\n self.loop.run_until_complete(self.startup())\n self.loop.run_until_complete(self.main_loop())\n self.loop.run_until_complete(self.shutdown())\n self.loop.stop()\n self.logger.info(\"Finished server process [{}]\".format(process_id))\n\n async def startup(self):\n config = self.config\n\n await self.lifespan.startup()\n\n create_protocol = functools.partial(\n config.http_protocol_class, config=config, server_state=self.server_state\n )\n\n if config.sockets is not None:\n # Explicitly passed a list of open sockets.\n # We use this when the server is run from a Gunicorn worker.\n self.servers = []\n for socket in config.sockets:\n server = await self.loop.create_server(create_protocol, sock=socket)\n self.servers.append(server)\n\n elif config.fd is not None:\n # Use an existing socket, from a file descriptor.\n sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM)\n server = await self.loop.create_server(create_protocol, sock=sock)\n message = \"Uvicorn running on socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % str(sock.getsockname()))\n self.servers = [server]\n\n elif config.uds is not None:\n # Create a socket using UNIX domain socket.\n server = await self.loop.create_unix_server(\n create_protocol, path=config.uds\n )\n message = \"Uvicorn running on unix socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % config.uds)\n self.servers = [server]\n\n else:\n # Standard case. Create a socket from a host/port pair.\n server = await self.loop.create_server(\n create_protocol, host=config.host, port=config.port\n )\n message = \"Uvicorn running on http://%s:%d (Press CTRL+C to quit)\"\n self.logger.info(message % (config.host, config.port))\n self.servers = [server]\n\n self.started = True\n\n async def main_loop(self):\n counter = 0\n should_exit = await self.on_tick(counter)\n while not should_exit:\n counter += 1\n counter = counter % 864000\n await asyncio.sleep(0.1)\n should_exit = await self.on_tick(counter)\n\n async def on_tick(self, counter) -> bool:\n # Update the default headers, once per second.\n if counter % 10 == 0:\n current_time = time.time()\n current_date = formatdate(current_time, usegmt=True).encode()\n self.server_state.default_headers = [\n (b\"server\", b\"uvicorn\"),\n (b\"date\", current_date),\n ]\n\n # Callback to `callback_notify` once every `timeout_notify` seconds.\n if self.config.callback_notify is not None:\n if counter % (10 * self.config.timeout_notify) == 0:\n await self.config.callback_notify()\n\n # Determine if we should exit.\n if self.should_exit:\n return True\n if self.config.limit_max_requests is not None:\n return self.server_state.total_requests >= self.config.limit_max_requests\n return False\n\n async def shutdown(self):\n self.logger.info(\"Shutting down\")\n\n # Stop accepting new connections.\n if not self.config.sockets:\n for server in self.servers:\n server.close()\n for server in self.servers:\n await server.wait_closed()\n\n # Request shutdown on all existing connections.\n for connection in list(self.server_state.connections):\n connection.shutdown()\n await asyncio.sleep(0.1)\n\n # Wait for existing connections to finish sending responses.\n if self.server_state.connections and not self.force_exit:\n msg = \"Waiting for connections to close. (CTRL+C to force quit)\"\n self.logger.info(msg)\n while self.server_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Wait for existing tasks to complete.\n if self.server_state.tasks and not self.force_exit:\n msg = \"Waiting for background tasks to complete. (CTRL+C to force quit)\"\n self.logger.info(msg)\n while self.server_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n # Send the lifespan shutdown event, and wait for application shutdown.\n if not self.force_exit:\n await self.lifespan.shutdown()\n\n def install_signal_handlers(self):\n try:\n for sig in HANDLED_SIGNALS:\n self.loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError as exc:\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig, frame):\n if self.should_exit:\n self.force_exit = True\n else:\n self.should_exit = True\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "uvicorn/main.py"}, {"content": "from uvicorn.main import main, run\n\n__version__ = \"0.4.2\"\n__all__ = [\"main\", \"run\"]\n", "path": "uvicorn/__init__.py"}]}
3,861
161
gh_patches_debug_14938
rasdani/github-patches
git_diff
espnet__espnet-2230
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 ``` ERROR: Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 (from versions: 7.2.0, 7.3.0, 7.4.0, 7.5.0, 7.6.0, 8.0.0a1, 8.0.0b1, 8.0.0b2, 8.0.0b3, 8.0.0b4) ``` cupy-cuda102 doesn't have version 6.0.0 , upgrade ```CHAINER_VERSION := 6.0.0``` to ```CHAINER_VERSION := 7.4.0```, please --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/check_install.py` Content: ``` 1 #!/usr/bin/env python3 2 3 """Script to check whether the installation is done correctly.""" 4 5 # Copyright 2018 Nagoya University (Tomoki Hayashi) 6 # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) 7 8 import argparse 9 import importlib 10 import logging 11 import sys 12 13 from distutils.version import LooseVersion 14 15 16 # NOTE: add the libraries which are not included in setup.py 17 MANUALLY_INSTALLED_LIBRARIES = [ 18 ("espnet", None), 19 ("kaldiio", None), 20 ("matplotlib", None), 21 ("chainer", ("6.0.0")), 22 ("chainer_ctc", None), 23 ("warprnnt_pytorch", ("0.1")), 24 ] 25 26 # NOTE: list all torch versions which are compatible with espnet 27 COMPATIBLE_TORCH_VERSIONS = ( 28 "0.4.1", 29 "1.0.0", 30 "1.0.1", 31 "1.0.1.post2", 32 "1.1.0", 33 "1.2.0", 34 "1.3.0", 35 "1.3.1", 36 "1.4.0", 37 "1.5.0", 38 "1.5.1", 39 ) 40 41 42 def main(args): 43 """Check the installation.""" 44 parser = argparse.ArgumentParser() 45 parser.add_argument( 46 "--no-cuda", 47 action="store_true", 48 default=False, 49 help="Disable cuda-related tests", 50 ) 51 args = parser.parse_args(args) 52 53 logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") 54 logging.info(f"python version = {sys.version}") 55 56 library_list = [] 57 58 if not args.no_cuda: 59 library_list.append(("cupy", ("6.0.0"))) 60 61 # check torch installation at first 62 try: 63 import torch 64 65 logging.info(f"pytorch version = {torch.__version__}") 66 if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS: 67 logging.warning(f"{torch.__version__} is not tested. please be careful.") 68 except ImportError: 69 logging.warning("torch is not installed.") 70 logging.warning("please try to setup again and then re-run this script.") 71 sys.exit(1) 72 73 # warpctc can be installed only for pytorch < 1.2 74 if LooseVersion(torch.__version__) < LooseVersion("1.2.0"): 75 library_list.append(("warpctc_pytorch", ("0.1.1", "0.1.3"))) 76 77 library_list.extend(MANUALLY_INSTALLED_LIBRARIES) 78 79 # check library availableness 80 logging.info("library availableness check start.") 81 logging.info("# libraries to be checked = %d" % len(library_list)) 82 is_correct_installed_list = [] 83 for idx, (name, version) in enumerate(library_list): 84 try: 85 importlib.import_module(name) 86 logging.info("--> %s is installed." % name) 87 is_correct_installed_list.append(True) 88 except ImportError: 89 logging.warning("--> %s is not installed." % name) 90 is_correct_installed_list.append(False) 91 logging.info("library availableness check done.") 92 logging.info( 93 "%d / %d libraries are correctly installed." 94 % (sum(is_correct_installed_list), len(library_list)) 95 ) 96 97 if len(library_list) != sum(is_correct_installed_list): 98 logging.warning("please try to setup again and then re-run this script.") 99 sys.exit(1) 100 101 # check library version 102 num_version_specified = sum( 103 [True if v is not None else False for n, v in library_list] 104 ) 105 logging.info("library version check start.") 106 logging.info("# libraries to be checked = %d" % num_version_specified) 107 is_correct_version_list = [] 108 for idx, (name, version) in enumerate(library_list): 109 if version is not None: 110 # Note: temp. fix for warprnnt_pytorch 111 # not found version with importlib 112 if name == "warprnnt_pytorch": 113 import pkg_resources 114 115 vers = pkg_resources.get_distribution(name).version 116 else: 117 vers = importlib.import_module(name).__version__ 118 if vers is not None: 119 is_correct = vers in version 120 if is_correct: 121 logging.info("--> %s version is matched (%s)." % (name, vers)) 122 is_correct_version_list.append(True) 123 else: 124 logging.warning( 125 "--> %s version is incorrect (%s is not in %s)." 126 % (name, vers, str(version)) 127 ) 128 is_correct_version_list.append(False) 129 else: 130 logging.info( 131 "--> %s has no version info, but version is specified." % name 132 ) 133 logging.info("--> maybe it is better to reinstall the latest version.") 134 is_correct_version_list.append(False) 135 logging.info("library version check done.") 136 logging.info( 137 "%d / %d libraries are correct version." 138 % (sum(is_correct_version_list), num_version_specified) 139 ) 140 141 if sum(is_correct_version_list) != num_version_specified: 142 logging.info("please try to setup again and then re-run this script.") 143 sys.exit(1) 144 145 # check cuda availableness 146 if args.no_cuda: 147 logging.info("cuda availableness check skipped.") 148 else: 149 logging.info("cuda availableness check start.") 150 import chainer 151 import torch 152 153 try: 154 assert torch.cuda.is_available() 155 logging.info("--> cuda is available in torch.") 156 except AssertionError: 157 logging.warning("--> it seems that cuda is not available in torch.") 158 try: 159 assert torch.backends.cudnn.is_available() 160 logging.info("--> cudnn is available in torch.") 161 except AssertionError: 162 logging.warning("--> it seems that cudnn is not available in torch.") 163 try: 164 assert chainer.backends.cuda.available 165 logging.info("--> cuda is available in chainer.") 166 except AssertionError: 167 logging.warning("--> it seems that cuda is not available in chainer.") 168 try: 169 assert chainer.backends.cuda.cudnn_enabled 170 logging.info("--> cudnn is available in chainer.") 171 except AssertionError: 172 logging.warning("--> it seems that cudnn is not available in chainer.") 173 try: 174 from cupy.cuda import nccl # NOQA 175 176 logging.info("--> nccl is installed.") 177 except ImportError: 178 logging.warning( 179 "--> it seems that nccl is not installed. multi-gpu is not enabled." 180 ) 181 logging.warning( 182 "--> if you want to use multi-gpu, please install it and then re-setup." 183 ) 184 try: 185 assert torch.cuda.device_count() > 1 186 logging.info( 187 f"--> multi-gpu is available (#gpus={torch.cuda.device_count()})." 188 ) 189 except AssertionError: 190 logging.warning("--> it seems that only single gpu is available.") 191 logging.warning("--> maybe your machine has only one gpu.") 192 logging.info("cuda availableness check done.") 193 194 logging.info("installation check is done.") 195 196 197 if __name__ == "__main__": 198 main(sys.argv[1:]) 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/check_install.py b/tools/check_install.py --- a/tools/check_install.py +++ b/tools/check_install.py @@ -48,14 +48,22 @@ default=False, help="Disable cuda-related tests", ) + parser.add_argument( + "--no-cupy", + action="store_true", + default=False, + help="Disable cupy test", + ) args = parser.parse_args(args) logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") logging.info(f"python version = {sys.version}") library_list = [] + if args.no_cuda: + args.no_cupy = True - if not args.no_cuda: + if not args.no_cupy: library_list.append(("cupy", ("6.0.0"))) # check torch installation at first
{"golden_diff": "diff --git a/tools/check_install.py b/tools/check_install.py\n--- a/tools/check_install.py\n+++ b/tools/check_install.py\n@@ -48,14 +48,22 @@\n default=False,\n help=\"Disable cuda-related tests\",\n )\n+ parser.add_argument(\n+ \"--no-cupy\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Disable cupy test\",\n+ )\n args = parser.parse_args(args)\n \n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n \n library_list = []\n+ if args.no_cuda:\n+ args.no_cupy = True\n \n- if not args.no_cuda:\n+ if not args.no_cupy:\n library_list.append((\"cupy\", (\"6.0.0\")))\n \n # check torch installation at first\n", "issue": "Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 \n```\r\nERROR: Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 (from versions: 7.2.0, 7.3.0, 7.4.0, 7.5.0, 7.6.0, 8.0.0a1, 8.0.0b1, 8.0.0b2, 8.0.0b3, 8.0.0b4)\r\n``` \r\ncupy-cuda102 doesn't have version 6.0.0 , upgrade ```CHAINER_VERSION := 6.0.0``` to ```CHAINER_VERSION := 7.4.0```, please\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to check whether the installation is done correctly.\"\"\"\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport sys\n\nfrom distutils.version import LooseVersion\n\n\n# NOTE: add the libraries which are not included in setup.py\nMANUALLY_INSTALLED_LIBRARIES = [\n (\"espnet\", None),\n (\"kaldiio\", None),\n (\"matplotlib\", None),\n (\"chainer\", (\"6.0.0\")),\n (\"chainer_ctc\", None),\n (\"warprnnt_pytorch\", (\"0.1\")),\n]\n\n# NOTE: list all torch versions which are compatible with espnet\nCOMPATIBLE_TORCH_VERSIONS = (\n \"0.4.1\",\n \"1.0.0\",\n \"1.0.1\",\n \"1.0.1.post2\",\n \"1.1.0\",\n \"1.2.0\",\n \"1.3.0\",\n \"1.3.1\",\n \"1.4.0\",\n \"1.5.0\",\n \"1.5.1\",\n)\n\n\ndef main(args):\n \"\"\"Check the installation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"Disable cuda-related tests\",\n )\n args = parser.parse_args(args)\n\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n\n library_list = []\n\n if not args.no_cuda:\n library_list.append((\"cupy\", (\"6.0.0\")))\n\n # check torch installation at first\n try:\n import torch\n\n logging.info(f\"pytorch version = {torch.__version__}\")\n if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:\n logging.warning(f\"{torch.__version__} is not tested. please be careful.\")\n except ImportError:\n logging.warning(\"torch is not installed.\")\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # warpctc can be installed only for pytorch < 1.2\n if LooseVersion(torch.__version__) < LooseVersion(\"1.2.0\"):\n library_list.append((\"warpctc_pytorch\", (\"0.1.1\", \"0.1.3\")))\n\n library_list.extend(MANUALLY_INSTALLED_LIBRARIES)\n\n # check library availableness\n logging.info(\"library availableness check start.\")\n logging.info(\"# libraries to be checked = %d\" % len(library_list))\n is_correct_installed_list = []\n for idx, (name, version) in enumerate(library_list):\n try:\n importlib.import_module(name)\n logging.info(\"--> %s is installed.\" % name)\n is_correct_installed_list.append(True)\n except ImportError:\n logging.warning(\"--> %s is not installed.\" % name)\n is_correct_installed_list.append(False)\n logging.info(\"library availableness check done.\")\n logging.info(\n \"%d / %d libraries are correctly installed.\"\n % (sum(is_correct_installed_list), len(library_list))\n )\n\n if len(library_list) != sum(is_correct_installed_list):\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check library version\n num_version_specified = sum(\n [True if v is not None else False for n, v in library_list]\n )\n logging.info(\"library version check start.\")\n logging.info(\"# libraries to be checked = %d\" % num_version_specified)\n is_correct_version_list = []\n for idx, (name, version) in enumerate(library_list):\n if version is not None:\n # Note: temp. fix for warprnnt_pytorch\n # not found version with importlib\n if name == \"warprnnt_pytorch\":\n import pkg_resources\n\n vers = pkg_resources.get_distribution(name).version\n else:\n vers = importlib.import_module(name).__version__\n if vers is not None:\n is_correct = vers in version\n if is_correct:\n logging.info(\"--> %s version is matched (%s).\" % (name, vers))\n is_correct_version_list.append(True)\n else:\n logging.warning(\n \"--> %s version is incorrect (%s is not in %s).\"\n % (name, vers, str(version))\n )\n is_correct_version_list.append(False)\n else:\n logging.info(\n \"--> %s has no version info, but version is specified.\" % name\n )\n logging.info(\"--> maybe it is better to reinstall the latest version.\")\n is_correct_version_list.append(False)\n logging.info(\"library version check done.\")\n logging.info(\n \"%d / %d libraries are correct version.\"\n % (sum(is_correct_version_list), num_version_specified)\n )\n\n if sum(is_correct_version_list) != num_version_specified:\n logging.info(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check cuda availableness\n if args.no_cuda:\n logging.info(\"cuda availableness check skipped.\")\n else:\n logging.info(\"cuda availableness check start.\")\n import chainer\n import torch\n\n try:\n assert torch.cuda.is_available()\n logging.info(\"--> cuda is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in torch.\")\n try:\n assert torch.backends.cudnn.is_available()\n logging.info(\"--> cudnn is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in torch.\")\n try:\n assert chainer.backends.cuda.available\n logging.info(\"--> cuda is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in chainer.\")\n try:\n assert chainer.backends.cuda.cudnn_enabled\n logging.info(\"--> cudnn is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in chainer.\")\n try:\n from cupy.cuda import nccl # NOQA\n\n logging.info(\"--> nccl is installed.\")\n except ImportError:\n logging.warning(\n \"--> it seems that nccl is not installed. multi-gpu is not enabled.\"\n )\n logging.warning(\n \"--> if you want to use multi-gpu, please install it and then re-setup.\"\n )\n try:\n assert torch.cuda.device_count() > 1\n logging.info(\n f\"--> multi-gpu is available (#gpus={torch.cuda.device_count()}).\"\n )\n except AssertionError:\n logging.warning(\"--> it seems that only single gpu is available.\")\n logging.warning(\"--> maybe your machine has only one gpu.\")\n logging.info(\"cuda availableness check done.\")\n\n logging.info(\"installation check is done.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "tools/check_install.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to check whether the installation is done correctly.\"\"\"\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport sys\n\nfrom distutils.version import LooseVersion\n\n\n# NOTE: add the libraries which are not included in setup.py\nMANUALLY_INSTALLED_LIBRARIES = [\n (\"espnet\", None),\n (\"kaldiio\", None),\n (\"matplotlib\", None),\n (\"chainer\", (\"6.0.0\")),\n (\"chainer_ctc\", None),\n (\"warprnnt_pytorch\", (\"0.1\")),\n]\n\n# NOTE: list all torch versions which are compatible with espnet\nCOMPATIBLE_TORCH_VERSIONS = (\n \"0.4.1\",\n \"1.0.0\",\n \"1.0.1\",\n \"1.0.1.post2\",\n \"1.1.0\",\n \"1.2.0\",\n \"1.3.0\",\n \"1.3.1\",\n \"1.4.0\",\n \"1.5.0\",\n \"1.5.1\",\n)\n\n\ndef main(args):\n \"\"\"Check the installation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"Disable cuda-related tests\",\n )\n parser.add_argument(\n \"--no-cupy\",\n action=\"store_true\",\n default=False,\n help=\"Disable cupy test\",\n )\n args = parser.parse_args(args)\n\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n\n library_list = []\n if args.no_cuda:\n args.no_cupy = True\n\n if not args.no_cupy:\n library_list.append((\"cupy\", (\"6.0.0\")))\n\n # check torch installation at first\n try:\n import torch\n\n logging.info(f\"pytorch version = {torch.__version__}\")\n if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:\n logging.warning(f\"{torch.__version__} is not tested. please be careful.\")\n except ImportError:\n logging.warning(\"torch is not installed.\")\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # warpctc can be installed only for pytorch < 1.2\n if LooseVersion(torch.__version__) < LooseVersion(\"1.2.0\"):\n library_list.append((\"warpctc_pytorch\", (\"0.1.1\", \"0.1.3\")))\n\n library_list.extend(MANUALLY_INSTALLED_LIBRARIES)\n\n # check library availableness\n logging.info(\"library availableness check start.\")\n logging.info(\"# libraries to be checked = %d\" % len(library_list))\n is_correct_installed_list = []\n for idx, (name, version) in enumerate(library_list):\n try:\n importlib.import_module(name)\n logging.info(\"--> %s is installed.\" % name)\n is_correct_installed_list.append(True)\n except ImportError:\n logging.warning(\"--> %s is not installed.\" % name)\n is_correct_installed_list.append(False)\n logging.info(\"library availableness check done.\")\n logging.info(\n \"%d / %d libraries are correctly installed.\"\n % (sum(is_correct_installed_list), len(library_list))\n )\n\n if len(library_list) != sum(is_correct_installed_list):\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check library version\n num_version_specified = sum(\n [True if v is not None else False for n, v in library_list]\n )\n logging.info(\"library version check start.\")\n logging.info(\"# libraries to be checked = %d\" % num_version_specified)\n is_correct_version_list = []\n for idx, (name, version) in enumerate(library_list):\n if version is not None:\n # Note: temp. fix for warprnnt_pytorch\n # not found version with importlib\n if name == \"warprnnt_pytorch\":\n import pkg_resources\n\n vers = pkg_resources.get_distribution(name).version\n else:\n vers = importlib.import_module(name).__version__\n if vers is not None:\n is_correct = vers in version\n if is_correct:\n logging.info(\"--> %s version is matched (%s).\" % (name, vers))\n is_correct_version_list.append(True)\n else:\n logging.warning(\n \"--> %s version is incorrect (%s is not in %s).\"\n % (name, vers, str(version))\n )\n is_correct_version_list.append(False)\n else:\n logging.info(\n \"--> %s has no version info, but version is specified.\" % name\n )\n logging.info(\"--> maybe it is better to reinstall the latest version.\")\n is_correct_version_list.append(False)\n logging.info(\"library version check done.\")\n logging.info(\n \"%d / %d libraries are correct version.\"\n % (sum(is_correct_version_list), num_version_specified)\n )\n\n if sum(is_correct_version_list) != num_version_specified:\n logging.info(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check cuda availableness\n if args.no_cuda:\n logging.info(\"cuda availableness check skipped.\")\n else:\n logging.info(\"cuda availableness check start.\")\n import chainer\n import torch\n\n try:\n assert torch.cuda.is_available()\n logging.info(\"--> cuda is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in torch.\")\n try:\n assert torch.backends.cudnn.is_available()\n logging.info(\"--> cudnn is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in torch.\")\n try:\n assert chainer.backends.cuda.available\n logging.info(\"--> cuda is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in chainer.\")\n try:\n assert chainer.backends.cuda.cudnn_enabled\n logging.info(\"--> cudnn is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in chainer.\")\n try:\n from cupy.cuda import nccl # NOQA\n\n logging.info(\"--> nccl is installed.\")\n except ImportError:\n logging.warning(\n \"--> it seems that nccl is not installed. multi-gpu is not enabled.\"\n )\n logging.warning(\n \"--> if you want to use multi-gpu, please install it and then re-setup.\"\n )\n try:\n assert torch.cuda.device_count() > 1\n logging.info(\n f\"--> multi-gpu is available (#gpus={torch.cuda.device_count()}).\"\n )\n except AssertionError:\n logging.warning(\"--> it seems that only single gpu is available.\")\n logging.warning(\"--> maybe your machine has only one gpu.\")\n logging.info(\"cuda availableness check done.\")\n\n logging.info(\"installation check is done.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "tools/check_install.py"}]}
2,489
194
gh_patches_debug_20006
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1104
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create and populate folder for static discovery documents `googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `synth.py` Content: ``` 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import synthtool as s 16 from synthtool import gcp 17 18 common = gcp.CommonTemplates() 19 20 # ---------------------------------------------------------------------------- 21 # Add templated files 22 # ---------------------------------------------------------------------------- 23 templated_files = common.py_library() 24 25 # Copy kokoro configs. 26 # Docs are excluded as repo docs cannot currently be generated using sphinx. 27 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh']) 28 29 # Also move issue templates 30 s.move(templated_files / '.github') 31 32 s.shell.run(["nox", "-s", "docs"], hide_output=False) ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/synth.py b/synth.py --- a/synth.py +++ b/synth.py @@ -14,6 +14,9 @@ import synthtool as s from synthtool import gcp +from synthtool.sources import git + +DISCOVERY_ARTIFACT_MANAGER_REPO = "googleapis/discovery-artifact-manager" common = gcp.CommonTemplates() @@ -29,4 +32,20 @@ # Also move issue templates s.move(templated_files / '.github') -s.shell.run(["nox", "-s", "docs"], hide_output=False) \ No newline at end of file +# ---------------------------------------------------------------------------- +# Copy discoveries folder from discovery-artifact-manager repo +# ---------------------------------------------------------------------------- + +discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO) +discovery_artifacts = git.clone(discovery_artifact_manager_url) / "discoveries" + +excludes = [ + "**/BUILD.bazel", +] +s.copy(discovery_artifacts, + "googleapiclient/discovery_cache/documents", excludes=excludes) + +# ---------------------------------------------------------------------------- +# Generate docs +# ---------------------------------------------------------------------------- +s.shell.run(["nox", "-s", "docs"], hide_output=False)
{"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -14,6 +14,9 @@\n \n import synthtool as s\n from synthtool import gcp\n+from synthtool.sources import git\n+\n+DISCOVERY_ARTIFACT_MANAGER_REPO = \"googleapis/discovery-artifact-manager\"\n \n common = gcp.CommonTemplates()\n \n@@ -29,4 +32,20 @@\n # Also move issue templates\n s.move(templated_files / '.github')\n \n-s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n\\ No newline at end of file\n+# ----------------------------------------------------------------------------\n+# Copy discoveries folder from discovery-artifact-manager repo\n+# ----------------------------------------------------------------------------\n+\n+discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO)\n+discovery_artifacts = git.clone(discovery_artifact_manager_url) / \"discoveries\"\n+\n+excludes = [\n+ \"**/BUILD.bazel\",\n+]\n+s.copy(discovery_artifacts,\n+ \"googleapiclient/discovery_cache/documents\", excludes=excludes)\n+\n+# ----------------------------------------------------------------------------\n+# Generate docs\n+# ----------------------------------------------------------------------------\n+s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "issue": "Create and populate folder for static discovery documents \n`googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38).\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)", "path": "synth.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\nfrom synthtool.sources import git\n\nDISCOVERY_ARTIFACT_MANAGER_REPO = \"googleapis/discovery-artifact-manager\"\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\n# ----------------------------------------------------------------------------\n# Copy discoveries folder from discovery-artifact-manager repo\n# ----------------------------------------------------------------------------\n\ndiscovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO)\ndiscovery_artifacts = git.clone(discovery_artifact_manager_url) / \"discoveries\"\n\nexcludes = [\n \"**/BUILD.bazel\",\n]\ns.copy(discovery_artifacts,\n \"googleapiclient/discovery_cache/documents\", excludes=excludes)\n\n# ----------------------------------------------------------------------------\n# Generate docs\n# ----------------------------------------------------------------------------\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "path": "synth.py"}]}
758
262
gh_patches_debug_37059
rasdani/github-patches
git_diff
mantl__mantl-377
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use Consul DNS instead of .novalocal Templatizing the hosts file to include `.novalocal` worked when we were only on OpenStack, but now that we have multiple providers we need a better way to do service discovery. Fortunately, we have Consul DNS, which will probably work. - [ ] change Mesos to use DNS name - [ ] change Marathon to use DNS name --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/inventory/terraform.py` Content: ``` 1 #!/usr/bin/env python 2 """\ 3 Dynamic inventory for Terraform - finds all `.tfstate` files below the working 4 directory and generates an inventory based on them. 5 """ 6 from __future__ import unicode_literals, print_function 7 import argparse 8 from collections import defaultdict 9 from functools import wraps 10 import json 11 import os 12 import re 13 14 15 def tfstates(root=None): 16 root = root or os.getcwd() 17 for dirpath, _, filenames in os.walk(root): 18 for name in filenames: 19 if os.path.splitext(name)[-1] == '.tfstate': 20 yield os.path.join(dirpath, name) 21 22 23 def iterresources(filenames): 24 for filename in filenames: 25 with open(filename, 'r') as json_file: 26 state = json.load(json_file) 27 for module in state['modules']: 28 for key, resource in module['resources'].items(): 29 yield key, resource 30 31 ## READ RESOURCES 32 PARSERS = {} 33 34 35 def _clean_dc(dcname): 36 # Consul DCs are strictly alphanumeric with underscores and hyphens - 37 # ensure that the consul_dc attribute meets these requirements. 38 return re.sub('[^\w_\-]', '-', dcname) 39 40 41 def iterhosts(resources): 42 '''yield host tuples of (name, attributes, groups)''' 43 for key, resource in resources: 44 resource_type, name = key.split('.', 1) 45 try: 46 parser = PARSERS[resource_type] 47 except KeyError: 48 continue 49 50 yield parser(resource) 51 52 53 def parses(prefix): 54 def inner(func): 55 PARSERS[prefix] = func 56 return func 57 58 return inner 59 60 61 def calculate_mi_vars(func): 62 """calculate microservices-infrastructure vars""" 63 64 @wraps(func) 65 def inner(*args, **kwargs): 66 name, attrs, groups = func(*args, **kwargs) 67 68 # attrs 69 if attrs['role'] == 'control': 70 attrs['consul_is_server'] = True 71 elif attrs['role'] == 'worker': 72 attrs['consul_is_server'] = False 73 74 # groups 75 if attrs.get('publicly_routable', False): 76 groups.append('publicly_routable') 77 78 return name, attrs, groups 79 80 return inner 81 82 83 def _parse_prefix(source, prefix, sep='.'): 84 for compkey, value in source.items(): 85 try: 86 curprefix, rest = compkey.split(sep, 1) 87 except ValueError: 88 continue 89 90 if curprefix != prefix or rest == '#': 91 continue 92 93 yield rest, value 94 95 96 def parse_attr_list(source, prefix, sep='.'): 97 size_key = '%s%s#' % (prefix, sep) 98 try: 99 size = int(source[size_key]) 100 except KeyError: 101 return [] 102 103 attrs = [{} for _ in range(size)] 104 for compkey, value in _parse_prefix(source, prefix, sep): 105 nth, key = compkey.split(sep, 1) 106 attrs[int(nth)][key] = value 107 108 return attrs 109 110 111 def parse_dict(source, prefix, sep='.'): 112 return dict(_parse_prefix(source, prefix, sep)) 113 114 115 def parse_list(source, prefix, sep='.'): 116 return [value for _, value in _parse_prefix(source, prefix, sep)] 117 118 119 @parses('openstack_compute_instance_v2') 120 @calculate_mi_vars 121 def openstack_host(resource, tfvars=None): 122 raw_attrs = resource['primary']['attributes'] 123 name = raw_attrs['name'] 124 groups = [] 125 126 attrs = { 127 'access_ip_v4': raw_attrs['access_ip_v4'], 128 'access_ip_v6': raw_attrs['access_ip_v6'], 129 'flavor': parse_dict(raw_attrs, 'flavor', 130 sep='_'), 131 'id': raw_attrs['id'], 132 'image': parse_dict(raw_attrs, 'image', 133 sep='_'), 134 'key_pair': raw_attrs['key_pair'], 135 'metadata': parse_dict(raw_attrs, 'metadata'), 136 'network': parse_attr_list(raw_attrs, 'network'), 137 'region': raw_attrs['region'], 138 'security_groups': parse_list(raw_attrs, 'security_groups'), 139 #ansible 140 'ansible_ssh_port': 22, 141 'ansible_ssh_user': 'centos', 142 } 143 144 try: 145 attrs.update({ 146 'ansible_ssh_host': raw_attrs['access_ip_v4'], 147 'publicly_routable': True, 148 }) 149 except (KeyError, ValueError): 150 attrs.update({'ansible_ssh_host': '', 'publicly_routable': False, }) 151 152 # attrs specific to microservices-infrastructure 153 attrs.update({ 154 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])), 155 'role': attrs['metadata'].get('role', 'none') 156 }) 157 158 # add groups based on attrs 159 groups.append('os_image=' + attrs['image']['name']) 160 groups.append('os_flavor=' + attrs['flavor']['name']) 161 groups.extend('os_metadata_%s=%s' % item 162 for item in attrs['metadata'].items()) 163 groups.append('os_region=' + attrs['region']) 164 165 # groups specific to microservices-infrastructure 166 groups.append('role=' + attrs['metadata'].get('role', 'none')) 167 groups.append('dc=' + attrs['consul_dc']) 168 169 return name, attrs, groups 170 171 172 @parses('google_compute_instance') 173 @calculate_mi_vars 174 def gce_host(resource, tfvars=None): 175 name = resource['primary']['id'] 176 raw_attrs = resource['primary']['attributes'] 177 groups = [] 178 179 # network interfaces 180 interfaces = parse_attr_list(raw_attrs, 'network_interface') 181 for interface in interfaces: 182 interface['access_config'] = parse_attr_list(interface, 183 'access_config') 184 for key in interface.keys(): 185 if '.' in key: 186 del interface[key] 187 188 # general attrs 189 attrs = { 190 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true', 191 'disks': parse_attr_list(raw_attrs, 'disk'), 192 'machine_type': raw_attrs['machine_type'], 193 'metadata': parse_dict(raw_attrs, 'metadata'), 194 'network': parse_attr_list(raw_attrs, 'network'), 195 'network_interface': interfaces, 196 'self_link': raw_attrs['self_link'], 197 'service_account': parse_attr_list(raw_attrs, 'service_account'), 198 'tags': parse_list(raw_attrs, 'tags'), 199 'zone': raw_attrs['zone'], 200 # ansible 201 'ansible_ssh_port': 22, 202 'ansible_ssh_user': 'deploy', 203 } 204 205 # attrs specific to microservices-infrastructure 206 attrs.update({ 207 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])), 208 'role': attrs['metadata'].get('role', 'none') 209 }) 210 211 try: 212 attrs.update({ 213 'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'], 214 'publicly_routable': True, 215 }) 216 except (KeyError, ValueError): 217 attrs.update({'ansible_ssh_host': '', 'publicly_routable': False}) 218 219 # add groups based on attrs 220 groups.extend('gce_image=' + disk['image'] for disk in attrs['disks']) 221 groups.append('gce_machine_type=' + attrs['machine_type']) 222 groups.extend('gce_metadata_%s=%s' % (key, value) 223 for (key, value) in attrs['metadata'].items() 224 if key not in set(['sshKeys'])) 225 groups.extend('gce_tag=' + tag for tag in attrs['tags']) 226 groups.append('gce_zone=' + attrs['zone']) 227 228 if attrs['can_ip_forward']: 229 groups.append('gce_ip_forward') 230 if attrs['publicly_routable']: 231 groups.append('gce_publicly_routable') 232 233 # groups specific to microservices-infrastructure 234 groups.append('role=' + attrs['metadata'].get('role', 'none')) 235 groups.append('dc=' + attrs['consul_dc']) 236 237 return name, attrs, groups 238 239 240 ## QUERY TYPES 241 def query_host(hosts, target): 242 for name, attrs, _ in hosts: 243 if name == target: 244 return attrs 245 246 return {} 247 248 249 def query_list(hosts): 250 groups = defaultdict(dict) 251 meta = {} 252 253 for name, attrs, hostgroups in hosts: 254 for group in set(hostgroups): 255 groups[group].setdefault('hosts', []) 256 groups[group]['hosts'].append(name) 257 258 meta[name] = attrs 259 260 groups['_meta'] = {'hostvars': meta} 261 return groups 262 263 264 def main(): 265 266 parser = argparse.ArgumentParser(__file__, __doc__) 267 modes = parser.add_mutually_exclusive_group(required=True) 268 modes.add_argument('--list', 269 action='store_true', 270 help='list all variables') 271 modes.add_argument('--host', help='list variables for a single host') 272 parser.add_argument('--pretty', 273 action='store_true', 274 help='pretty-print output JSON') 275 parser.add_argument('--nometa', 276 action='store_true', 277 help='with --list, exclude hostvars') 278 279 args = parser.parse_args() 280 281 hosts = iterhosts(iterresources(tfstates())) 282 if args.list: 283 output = query_list(hosts) 284 if args.nometa: 285 del output['_meta'] 286 else: 287 output = query_host(hosts, args.host) 288 289 print(json.dumps(output, indent=4 if args.pretty else None)) 290 parser.exit() 291 292 293 if __name__ == '__main__': 294 main() 295 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/inventory/terraform.py b/plugins/inventory/terraform.py --- a/plugins/inventory/terraform.py +++ b/plugins/inventory/terraform.py @@ -116,6 +116,17 @@ return [value for _, value in _parse_prefix(source, prefix, sep)] +def parse_bool(string_form): + token = string_form.lower()[0] + + if token == 't': + return True + elif token == 'f': + return False + else: + raise ValueError('could not convert %r to a bool' % string_form) + + @parses('openstack_compute_instance_v2') @calculate_mi_vars def openstack_host(resource, tfvars=None): @@ -169,6 +180,65 @@ return name, attrs, groups +@parses('aws_instance') +@calculate_mi_vars +def aws_host(resource, tfvars=None): + name = resource['primary']['attributes']['tags.Name'] + raw_attrs = resource['primary']['attributes'] + + groups = [] + + attrs = { + 'ami': raw_attrs['ami'], + 'availability_zone': raw_attrs['availability_zone'], + 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'), + 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']), + 'ephemeral_block_device': parse_attr_list(raw_attrs, + 'ephemeral_block_device'), + 'id': raw_attrs['id'], + 'key_name': raw_attrs['key_name'], + 'private': parse_dict(raw_attrs, 'private', + sep='_'), + 'public': parse_dict(raw_attrs, 'public', + sep='_'), + 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'), + 'security_groups': parse_attr_list(raw_attrs, 'security_groups'), + 'subnet': parse_dict(raw_attrs, 'subnet', + sep='_'), + 'tags': parse_dict(raw_attrs, 'tags'), + 'tenancy': raw_attrs['tenancy'], + 'vpc_security_group_ids': parse_list(raw_attrs, + 'vpc_security_group_ids'), + # ansible-specific + 'ansible_ssh_port': 22, + 'ansible_ssh_user': raw_attrs['tags.sshUser'], + 'ansible_ssh_host': raw_attrs['public_ip'], + } + + # attrs specific to microservices-infrastructure + attrs.update({ + 'consul_dc': attrs['tags'].get('dc'), + 'role': attrs['tags'].get('role', 'none') + }) + + # groups specific to microservices-infrastructure + groups.extend(['aws_ami=' + attrs['ami'], + 'aws_az=' + attrs['availability_zone'], + 'aws_key_name=' + attrs['key_name'], + 'aws_tenancy=' + attrs['tenancy']]) + groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items()) + groups.extend('aws_vpc_security_group=' + group + for group in attrs['vpc_security_group_ids']) + groups.extend('aws_subnet_%s=%s' % subnet + for subnet in attrs['subnet'].items()) + + # groups specific to microservices-infrastructure + groups.append('role=' + attrs['role']) + groups.append('dc=' + attrs['consul_dc']) + + return name, attrs, groups + + @parses('google_compute_instance') @calculate_mi_vars def gce_host(resource, tfvars=None):
{"golden_diff": "diff --git a/plugins/inventory/terraform.py b/plugins/inventory/terraform.py\n--- a/plugins/inventory/terraform.py\n+++ b/plugins/inventory/terraform.py\n@@ -116,6 +116,17 @@\n return [value for _, value in _parse_prefix(source, prefix, sep)]\n \n \n+def parse_bool(string_form):\n+ token = string_form.lower()[0]\n+\n+ if token == 't':\n+ return True\n+ elif token == 'f':\n+ return False\n+ else:\n+ raise ValueError('could not convert %r to a bool' % string_form)\n+\n+\n @parses('openstack_compute_instance_v2')\n @calculate_mi_vars\n def openstack_host(resource, tfvars=None):\n@@ -169,6 +180,65 @@\n return name, attrs, groups\n \n \n+@parses('aws_instance')\n+@calculate_mi_vars\n+def aws_host(resource, tfvars=None):\n+ name = resource['primary']['attributes']['tags.Name']\n+ raw_attrs = resource['primary']['attributes']\n+\n+ groups = []\n+\n+ attrs = {\n+ 'ami': raw_attrs['ami'],\n+ 'availability_zone': raw_attrs['availability_zone'],\n+ 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),\n+ 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),\n+ 'ephemeral_block_device': parse_attr_list(raw_attrs,\n+ 'ephemeral_block_device'),\n+ 'id': raw_attrs['id'],\n+ 'key_name': raw_attrs['key_name'],\n+ 'private': parse_dict(raw_attrs, 'private',\n+ sep='_'),\n+ 'public': parse_dict(raw_attrs, 'public',\n+ sep='_'),\n+ 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),\n+ 'security_groups': parse_attr_list(raw_attrs, 'security_groups'),\n+ 'subnet': parse_dict(raw_attrs, 'subnet',\n+ sep='_'),\n+ 'tags': parse_dict(raw_attrs, 'tags'),\n+ 'tenancy': raw_attrs['tenancy'],\n+ 'vpc_security_group_ids': parse_list(raw_attrs,\n+ 'vpc_security_group_ids'),\n+ # ansible-specific\n+ 'ansible_ssh_port': 22,\n+ 'ansible_ssh_user': raw_attrs['tags.sshUser'],\n+ 'ansible_ssh_host': raw_attrs['public_ip'],\n+ }\n+\n+ # attrs specific to microservices-infrastructure\n+ attrs.update({\n+ 'consul_dc': attrs['tags'].get('dc'),\n+ 'role': attrs['tags'].get('role', 'none')\n+ })\n+\n+ # groups specific to microservices-infrastructure\n+ groups.extend(['aws_ami=' + attrs['ami'],\n+ 'aws_az=' + attrs['availability_zone'],\n+ 'aws_key_name=' + attrs['key_name'],\n+ 'aws_tenancy=' + attrs['tenancy']])\n+ groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())\n+ groups.extend('aws_vpc_security_group=' + group\n+ for group in attrs['vpc_security_group_ids'])\n+ groups.extend('aws_subnet_%s=%s' % subnet\n+ for subnet in attrs['subnet'].items())\n+\n+ # groups specific to microservices-infrastructure\n+ groups.append('role=' + attrs['role'])\n+ groups.append('dc=' + attrs['consul_dc'])\n+\n+ return name, attrs, groups\n+\n+\n @parses('google_compute_instance')\n @calculate_mi_vars\n def gce_host(resource, tfvars=None):\n", "issue": "Use Consul DNS instead of .novalocal\nTemplatizing the hosts file to include `.novalocal` worked when we were only on OpenStack, but now that we have multiple providers we need a better way to do service discovery. Fortunately, we have Consul DNS, which will probably work.\n- [ ] change Mesos to use DNS name\n- [ ] change Marathon to use DNS name\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\\\nDynamic inventory for Terraform - finds all `.tfstate` files below the working\ndirectory and generates an inventory based on them.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport argparse\nfrom collections import defaultdict\nfrom functools import wraps\nimport json\nimport os\nimport re\n\n\ndef tfstates(root=None):\n root = root or os.getcwd()\n for dirpath, _, filenames in os.walk(root):\n for name in filenames:\n if os.path.splitext(name)[-1] == '.tfstate':\n yield os.path.join(dirpath, name)\n\n\ndef iterresources(filenames):\n for filename in filenames:\n with open(filename, 'r') as json_file:\n state = json.load(json_file)\n for module in state['modules']:\n for key, resource in module['resources'].items():\n yield key, resource\n\n## READ RESOURCES\nPARSERS = {}\n\n\ndef _clean_dc(dcname):\n # Consul DCs are strictly alphanumeric with underscores and hyphens -\n # ensure that the consul_dc attribute meets these requirements.\n return re.sub('[^\\w_\\-]', '-', dcname)\n\n\ndef iterhosts(resources):\n '''yield host tuples of (name, attributes, groups)'''\n for key, resource in resources:\n resource_type, name = key.split('.', 1)\n try:\n parser = PARSERS[resource_type]\n except KeyError:\n continue\n\n yield parser(resource)\n\n\ndef parses(prefix):\n def inner(func):\n PARSERS[prefix] = func\n return func\n\n return inner\n\n\ndef calculate_mi_vars(func):\n \"\"\"calculate microservices-infrastructure vars\"\"\"\n\n @wraps(func)\n def inner(*args, **kwargs):\n name, attrs, groups = func(*args, **kwargs)\n\n # attrs\n if attrs['role'] == 'control':\n attrs['consul_is_server'] = True\n elif attrs['role'] == 'worker':\n attrs['consul_is_server'] = False\n\n # groups\n if attrs.get('publicly_routable', False):\n groups.append('publicly_routable')\n\n return name, attrs, groups\n\n return inner\n\n\ndef _parse_prefix(source, prefix, sep='.'):\n for compkey, value in source.items():\n try:\n curprefix, rest = compkey.split(sep, 1)\n except ValueError:\n continue\n\n if curprefix != prefix or rest == '#':\n continue\n\n yield rest, value\n\n\ndef parse_attr_list(source, prefix, sep='.'):\n size_key = '%s%s#' % (prefix, sep)\n try:\n size = int(source[size_key])\n except KeyError:\n return []\n\n attrs = [{} for _ in range(size)]\n for compkey, value in _parse_prefix(source, prefix, sep):\n nth, key = compkey.split(sep, 1)\n attrs[int(nth)][key] = value\n\n return attrs\n\n\ndef parse_dict(source, prefix, sep='.'):\n return dict(_parse_prefix(source, prefix, sep))\n\n\ndef parse_list(source, prefix, sep='.'):\n return [value for _, value in _parse_prefix(source, prefix, sep)]\n\n\n@parses('openstack_compute_instance_v2')\n@calculate_mi_vars\ndef openstack_host(resource, tfvars=None):\n raw_attrs = resource['primary']['attributes']\n name = raw_attrs['name']\n groups = []\n\n attrs = {\n 'access_ip_v4': raw_attrs['access_ip_v4'],\n 'access_ip_v6': raw_attrs['access_ip_v6'],\n 'flavor': parse_dict(raw_attrs, 'flavor',\n sep='_'),\n 'id': raw_attrs['id'],\n 'image': parse_dict(raw_attrs, 'image',\n sep='_'),\n 'key_pair': raw_attrs['key_pair'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'region': raw_attrs['region'],\n 'security_groups': parse_list(raw_attrs, 'security_groups'),\n #ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'centos',\n }\n\n try:\n attrs.update({\n 'ansible_ssh_host': raw_attrs['access_ip_v4'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False, })\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n # add groups based on attrs\n groups.append('os_image=' + attrs['image']['name'])\n groups.append('os_flavor=' + attrs['flavor']['name'])\n groups.extend('os_metadata_%s=%s' % item\n for item in attrs['metadata'].items())\n groups.append('os_region=' + attrs['region'])\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n@parses('google_compute_instance')\n@calculate_mi_vars\ndef gce_host(resource, tfvars=None):\n name = resource['primary']['id']\n raw_attrs = resource['primary']['attributes']\n groups = []\n\n # network interfaces\n interfaces = parse_attr_list(raw_attrs, 'network_interface')\n for interface in interfaces:\n interface['access_config'] = parse_attr_list(interface,\n 'access_config')\n for key in interface.keys():\n if '.' in key:\n del interface[key]\n\n # general attrs\n attrs = {\n 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',\n 'disks': parse_attr_list(raw_attrs, 'disk'),\n 'machine_type': raw_attrs['machine_type'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'network_interface': interfaces,\n 'self_link': raw_attrs['self_link'],\n 'service_account': parse_attr_list(raw_attrs, 'service_account'),\n 'tags': parse_list(raw_attrs, 'tags'),\n 'zone': raw_attrs['zone'],\n # ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'deploy',\n }\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n try:\n attrs.update({\n 'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})\n\n # add groups based on attrs\n groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])\n groups.append('gce_machine_type=' + attrs['machine_type'])\n groups.extend('gce_metadata_%s=%s' % (key, value)\n for (key, value) in attrs['metadata'].items()\n if key not in set(['sshKeys']))\n groups.extend('gce_tag=' + tag for tag in attrs['tags'])\n groups.append('gce_zone=' + attrs['zone'])\n\n if attrs['can_ip_forward']:\n groups.append('gce_ip_forward')\n if attrs['publicly_routable']:\n groups.append('gce_publicly_routable')\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n## QUERY TYPES\ndef query_host(hosts, target):\n for name, attrs, _ in hosts:\n if name == target:\n return attrs\n\n return {}\n\n\ndef query_list(hosts):\n groups = defaultdict(dict)\n meta = {}\n\n for name, attrs, hostgroups in hosts:\n for group in set(hostgroups):\n groups[group].setdefault('hosts', [])\n groups[group]['hosts'].append(name)\n\n meta[name] = attrs\n\n groups['_meta'] = {'hostvars': meta}\n return groups\n\n\ndef main():\n\n parser = argparse.ArgumentParser(__file__, __doc__)\n modes = parser.add_mutually_exclusive_group(required=True)\n modes.add_argument('--list',\n action='store_true',\n help='list all variables')\n modes.add_argument('--host', help='list variables for a single host')\n parser.add_argument('--pretty',\n action='store_true',\n help='pretty-print output JSON')\n parser.add_argument('--nometa',\n action='store_true',\n help='with --list, exclude hostvars')\n\n args = parser.parse_args()\n\n hosts = iterhosts(iterresources(tfstates()))\n if args.list:\n output = query_list(hosts)\n if args.nometa:\n del output['_meta']\n else:\n output = query_host(hosts, args.host)\n\n print(json.dumps(output, indent=4 if args.pretty else None))\n parser.exit()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/inventory/terraform.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\\\nDynamic inventory for Terraform - finds all `.tfstate` files below the working\ndirectory and generates an inventory based on them.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport argparse\nfrom collections import defaultdict\nfrom functools import wraps\nimport json\nimport os\nimport re\n\n\ndef tfstates(root=None):\n root = root or os.getcwd()\n for dirpath, _, filenames in os.walk(root):\n for name in filenames:\n if os.path.splitext(name)[-1] == '.tfstate':\n yield os.path.join(dirpath, name)\n\n\ndef iterresources(filenames):\n for filename in filenames:\n with open(filename, 'r') as json_file:\n state = json.load(json_file)\n for module in state['modules']:\n for key, resource in module['resources'].items():\n yield key, resource\n\n## READ RESOURCES\nPARSERS = {}\n\n\ndef _clean_dc(dcname):\n # Consul DCs are strictly alphanumeric with underscores and hyphens -\n # ensure that the consul_dc attribute meets these requirements.\n return re.sub('[^\\w_\\-]', '-', dcname)\n\n\ndef iterhosts(resources):\n '''yield host tuples of (name, attributes, groups)'''\n for key, resource in resources:\n resource_type, name = key.split('.', 1)\n try:\n parser = PARSERS[resource_type]\n except KeyError:\n continue\n\n yield parser(resource)\n\n\ndef parses(prefix):\n def inner(func):\n PARSERS[prefix] = func\n return func\n\n return inner\n\n\ndef calculate_mi_vars(func):\n \"\"\"calculate microservices-infrastructure vars\"\"\"\n\n @wraps(func)\n def inner(*args, **kwargs):\n name, attrs, groups = func(*args, **kwargs)\n\n # attrs\n if attrs['role'] == 'control':\n attrs['consul_is_server'] = True\n elif attrs['role'] == 'worker':\n attrs['consul_is_server'] = False\n\n # groups\n if attrs.get('publicly_routable', False):\n groups.append('publicly_routable')\n\n return name, attrs, groups\n\n return inner\n\n\ndef _parse_prefix(source, prefix, sep='.'):\n for compkey, value in source.items():\n try:\n curprefix, rest = compkey.split(sep, 1)\n except ValueError:\n continue\n\n if curprefix != prefix or rest == '#':\n continue\n\n yield rest, value\n\n\ndef parse_attr_list(source, prefix, sep='.'):\n size_key = '%s%s#' % (prefix, sep)\n try:\n size = int(source[size_key])\n except KeyError:\n return []\n\n attrs = [{} for _ in range(size)]\n for compkey, value in _parse_prefix(source, prefix, sep):\n nth, key = compkey.split(sep, 1)\n attrs[int(nth)][key] = value\n\n return attrs\n\n\ndef parse_dict(source, prefix, sep='.'):\n return dict(_parse_prefix(source, prefix, sep))\n\n\ndef parse_list(source, prefix, sep='.'):\n return [value for _, value in _parse_prefix(source, prefix, sep)]\n\n\ndef parse_bool(string_form):\n token = string_form.lower()[0]\n\n if token == 't':\n return True\n elif token == 'f':\n return False\n else:\n raise ValueError('could not convert %r to a bool' % string_form)\n\n\n@parses('openstack_compute_instance_v2')\n@calculate_mi_vars\ndef openstack_host(resource, tfvars=None):\n raw_attrs = resource['primary']['attributes']\n name = raw_attrs['name']\n groups = []\n\n attrs = {\n 'access_ip_v4': raw_attrs['access_ip_v4'],\n 'access_ip_v6': raw_attrs['access_ip_v6'],\n 'flavor': parse_dict(raw_attrs, 'flavor',\n sep='_'),\n 'id': raw_attrs['id'],\n 'image': parse_dict(raw_attrs, 'image',\n sep='_'),\n 'key_pair': raw_attrs['key_pair'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'region': raw_attrs['region'],\n 'security_groups': parse_list(raw_attrs, 'security_groups'),\n #ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'centos',\n }\n\n try:\n attrs.update({\n 'ansible_ssh_host': raw_attrs['access_ip_v4'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False, })\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n # add groups based on attrs\n groups.append('os_image=' + attrs['image']['name'])\n groups.append('os_flavor=' + attrs['flavor']['name'])\n groups.extend('os_metadata_%s=%s' % item\n for item in attrs['metadata'].items())\n groups.append('os_region=' + attrs['region'])\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n@parses('aws_instance')\n@calculate_mi_vars\ndef aws_host(resource, tfvars=None):\n name = resource['primary']['attributes']['tags.Name']\n raw_attrs = resource['primary']['attributes']\n\n groups = []\n\n attrs = {\n 'ami': raw_attrs['ami'],\n 'availability_zone': raw_attrs['availability_zone'],\n 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),\n 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),\n 'ephemeral_block_device': parse_attr_list(raw_attrs,\n 'ephemeral_block_device'),\n 'id': raw_attrs['id'],\n 'key_name': raw_attrs['key_name'],\n 'private': parse_dict(raw_attrs, 'private',\n sep='_'),\n 'public': parse_dict(raw_attrs, 'public',\n sep='_'),\n 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),\n 'security_groups': parse_attr_list(raw_attrs, 'security_groups'),\n 'subnet': parse_dict(raw_attrs, 'subnet',\n sep='_'),\n 'tags': parse_dict(raw_attrs, 'tags'),\n 'tenancy': raw_attrs['tenancy'],\n 'vpc_security_group_ids': parse_list(raw_attrs,\n 'vpc_security_group_ids'),\n # ansible-specific\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': raw_attrs['tags.sshUser'],\n 'ansible_ssh_host': raw_attrs['public_ip'],\n }\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': attrs['tags'].get('dc'),\n 'role': attrs['tags'].get('role', 'none')\n })\n\n # groups specific to microservices-infrastructure\n groups.extend(['aws_ami=' + attrs['ami'],\n 'aws_az=' + attrs['availability_zone'],\n 'aws_key_name=' + attrs['key_name'],\n 'aws_tenancy=' + attrs['tenancy']])\n groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())\n groups.extend('aws_vpc_security_group=' + group\n for group in attrs['vpc_security_group_ids'])\n groups.extend('aws_subnet_%s=%s' % subnet\n for subnet in attrs['subnet'].items())\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['role'])\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n@parses('google_compute_instance')\n@calculate_mi_vars\ndef gce_host(resource, tfvars=None):\n name = resource['primary']['id']\n raw_attrs = resource['primary']['attributes']\n groups = []\n\n # network interfaces\n interfaces = parse_attr_list(raw_attrs, 'network_interface')\n for interface in interfaces:\n interface['access_config'] = parse_attr_list(interface,\n 'access_config')\n for key in interface.keys():\n if '.' in key:\n del interface[key]\n\n # general attrs\n attrs = {\n 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',\n 'disks': parse_attr_list(raw_attrs, 'disk'),\n 'machine_type': raw_attrs['machine_type'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'network_interface': interfaces,\n 'self_link': raw_attrs['self_link'],\n 'service_account': parse_attr_list(raw_attrs, 'service_account'),\n 'tags': parse_list(raw_attrs, 'tags'),\n 'zone': raw_attrs['zone'],\n # ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'deploy',\n }\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n try:\n attrs.update({\n 'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})\n\n # add groups based on attrs\n groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])\n groups.append('gce_machine_type=' + attrs['machine_type'])\n groups.extend('gce_metadata_%s=%s' % (key, value)\n for (key, value) in attrs['metadata'].items()\n if key not in set(['sshKeys']))\n groups.extend('gce_tag=' + tag for tag in attrs['tags'])\n groups.append('gce_zone=' + attrs['zone'])\n\n if attrs['can_ip_forward']:\n groups.append('gce_ip_forward')\n if attrs['publicly_routable']:\n groups.append('gce_publicly_routable')\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n## QUERY TYPES\ndef query_host(hosts, target):\n for name, attrs, _ in hosts:\n if name == target:\n return attrs\n\n return {}\n\n\ndef query_list(hosts):\n groups = defaultdict(dict)\n meta = {}\n\n for name, attrs, hostgroups in hosts:\n for group in set(hostgroups):\n groups[group].setdefault('hosts', [])\n groups[group]['hosts'].append(name)\n\n meta[name] = attrs\n\n groups['_meta'] = {'hostvars': meta}\n return groups\n\n\ndef main():\n\n parser = argparse.ArgumentParser(__file__, __doc__)\n modes = parser.add_mutually_exclusive_group(required=True)\n modes.add_argument('--list',\n action='store_true',\n help='list all variables')\n modes.add_argument('--host', help='list variables for a single host')\n parser.add_argument('--pretty',\n action='store_true',\n help='pretty-print output JSON')\n parser.add_argument('--nometa',\n action='store_true',\n help='with --list, exclude hostvars')\n\n args = parser.parse_args()\n\n hosts = iterhosts(iterresources(tfstates()))\n if args.list:\n output = query_list(hosts)\n if args.nometa:\n del output['_meta']\n else:\n output = query_host(hosts, args.host)\n\n print(json.dumps(output, indent=4 if args.pretty else None))\n parser.exit()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/inventory/terraform.py"}]}
3,196
801
gh_patches_debug_24701
rasdani/github-patches
git_diff
ietf-tools__datatracker-6328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Migrate DocHistory objects for RFCs ### Describe the issue `DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id="rfc"` `Document`. The `name` and `rfc_number` fields should be updated to match. ### Code of Conduct - [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ietf/doc/migrations/0017_move_dochistory.py` Content: ``` 1 # Generated by Django 4.2.5 on 2023-09-11 17:52 2 3 from django.db import migrations 4 5 from django.db.models import Subquery, OuterRef, F 6 7 8 def forward(apps, schema_editor): 9 DocHistory = apps.get_model("doc", "DocHistory") 10 RelatedDocument = apps.get_model("doc", "RelatedDocument") 11 DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc") 12 DocHistory.objects.filter( 13 type_id="draft", doc__type_id="draft", name__startswith="rfc" 14 ).annotate( 15 rfc_id=Subquery( 16 RelatedDocument.objects.filter( 17 source_id=OuterRef("doc_id"), relationship_id="became_rfc" 18 ).values_list("target_id", flat=True)[:1] 19 ) 20 ).update( 21 doc_id=F("rfc_id"), type_id="rfc" 22 ) 23 assert not DocHistory.objects.filter( 24 name__startswith="rfc", type_id="draft" 25 ).exists() 26 27 28 class Migration(migrations.Migration): 29 dependencies = [ 30 ("doc", "0016_delete_docalias"), 31 ] 32 33 # There is no going back 34 operations = [migrations.RunPython(forward)] 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py --- a/ietf/doc/migrations/0017_move_dochistory.py +++ b/ietf/doc/migrations/0017_move_dochistory.py @@ -8,6 +8,7 @@ def forward(apps, schema_editor): DocHistory = apps.get_model("doc", "DocHistory") RelatedDocument = apps.get_model("doc", "RelatedDocument") + Document = apps.get_model("doc", "Document") DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc") DocHistory.objects.filter( type_id="draft", doc__type_id="draft", name__startswith="rfc" @@ -20,9 +21,19 @@ ).update( doc_id=F("rfc_id"), type_id="rfc" ) + DocHistory.objects.filter(type_id="rfc").annotate( + rfcno=Subquery( + Document.objects.filter(pk=OuterRef("doc_id")).values_list( + "rfc_number", flat=True + )[:1] + ) + ).update(rfc_number=F("rfcno")) assert not DocHistory.objects.filter( name__startswith="rfc", type_id="draft" ).exists() + assert not DocHistory.objects.filter( + type_id="rfc", rfc_number__isnull=True + ).exists() class Migration(migrations.Migration):
{"golden_diff": "diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py\n--- a/ietf/doc/migrations/0017_move_dochistory.py\n+++ b/ietf/doc/migrations/0017_move_dochistory.py\n@@ -8,6 +8,7 @@\n def forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n+ Document = apps.get_model(\"doc\", \"Document\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n@@ -20,9 +21,19 @@\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n+ DocHistory.objects.filter(type_id=\"rfc\").annotate(\n+ rfcno=Subquery(\n+ Document.objects.filter(pk=OuterRef(\"doc_id\")).values_list(\n+ \"rfc_number\", flat=True\n+ )[:1]\n+ )\n+ ).update(rfc_number=F(\"rfcno\"))\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n+ assert not DocHistory.objects.filter(\n+ type_id=\"rfc\", rfc_number__isnull=True\n+ ).exists()\n \n \n class Migration(migrations.Migration):\n", "issue": "Migrate DocHistory objects for RFCs\n### Describe the issue\n\n`DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id=\"rfc\"` `Document`. The `name` and `rfc_number` fields should be updated to match.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Generated by Django 4.2.5 on 2023-09-11 17:52\n\nfrom django.db import migrations\n\nfrom django.db.models import Subquery, OuterRef, F\n\n\ndef forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n ).annotate(\n rfc_id=Subquery(\n RelatedDocument.objects.filter(\n source_id=OuterRef(\"doc_id\"), relationship_id=\"became_rfc\"\n ).values_list(\"target_id\", flat=True)[:1]\n )\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"doc\", \"0016_delete_docalias\"),\n ]\n\n # There is no going back\n operations = [migrations.RunPython(forward)]\n", "path": "ietf/doc/migrations/0017_move_dochistory.py"}], "after_files": [{"content": "# Generated by Django 4.2.5 on 2023-09-11 17:52\n\nfrom django.db import migrations\n\nfrom django.db.models import Subquery, OuterRef, F\n\n\ndef forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n Document = apps.get_model(\"doc\", \"Document\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n ).annotate(\n rfc_id=Subquery(\n RelatedDocument.objects.filter(\n source_id=OuterRef(\"doc_id\"), relationship_id=\"became_rfc\"\n ).values_list(\"target_id\", flat=True)[:1]\n )\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n DocHistory.objects.filter(type_id=\"rfc\").annotate(\n rfcno=Subquery(\n Document.objects.filter(pk=OuterRef(\"doc_id\")).values_list(\n \"rfc_number\", flat=True\n )[:1]\n )\n ).update(rfc_number=F(\"rfcno\"))\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n assert not DocHistory.objects.filter(\n type_id=\"rfc\", rfc_number__isnull=True\n ).exists()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"doc\", \"0016_delete_docalias\"),\n ]\n\n # There is no going back\n operations = [migrations.RunPython(forward)]\n", "path": "ietf/doc/migrations/0017_move_dochistory.py"}]}
720
350
gh_patches_debug_11776
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-273
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- It's possible to create a FollowRequest for a Follow that already exists Send a follow request. User accepts. Send a second follow request (eg. using an old tab) Because requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/models/relationship.py` Content: ``` 1 ''' defines relationships between users ''' 2 from django.db import models 3 4 from bookwyrm import activitypub 5 from .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel 6 7 8 class UserRelationship(ActivitypubMixin, BookWyrmModel): 9 ''' many-to-many through table for followers ''' 10 user_subject = models.ForeignKey( 11 'User', 12 on_delete=models.PROTECT, 13 related_name='%(class)s_user_subject' 14 ) 15 user_object = models.ForeignKey( 16 'User', 17 on_delete=models.PROTECT, 18 related_name='%(class)s_user_object' 19 ) 20 21 class Meta: 22 ''' relationships should be unique ''' 23 abstract = True 24 constraints = [ 25 models.UniqueConstraint( 26 fields=['user_subject', 'user_object'], 27 name='%(class)s_unique' 28 ), 29 models.CheckConstraint( 30 check=~models.Q(user_subject=models.F('user_object')), 31 name='%(class)s_no_self' 32 ) 33 ] 34 35 activity_mappings = [ 36 ActivityMapping('id', 'remote_id'), 37 ActivityMapping('actor', 'user_subject'), 38 ActivityMapping('object', 'user_object'), 39 ] 40 activity_serializer = activitypub.Follow 41 42 def get_remote_id(self, status=None): 43 ''' use shelf identifier in remote_id ''' 44 status = status or 'follows' 45 base_path = self.user_subject.remote_id 46 return '%s#%s/%d' % (base_path, status, self.id) 47 48 49 def to_accept_activity(self): 50 ''' generate an Accept for this follow request ''' 51 return activitypub.Accept( 52 id=self.get_remote_id(status='accepts'), 53 actor=self.user_object.remote_id, 54 object=self.to_activity() 55 ).serialize() 56 57 58 def to_reject_activity(self): 59 ''' generate an Accept for this follow request ''' 60 return activitypub.Reject( 61 id=self.get_remote_id(status='rejects'), 62 actor=self.user_object.remote_id, 63 object=self.to_activity() 64 ).serialize() 65 66 67 class UserFollows(UserRelationship): 68 ''' Following a user ''' 69 status = 'follows' 70 71 @classmethod 72 def from_request(cls, follow_request): 73 ''' converts a follow request into a follow relationship ''' 74 return cls( 75 user_subject=follow_request.user_subject, 76 user_object=follow_request.user_object, 77 remote_id=follow_request.remote_id, 78 ) 79 80 81 class UserFollowRequest(UserRelationship): 82 ''' following a user requires manual or automatic confirmation ''' 83 status = 'follow_request' 84 85 86 class UserBlocks(UserRelationship): 87 ''' prevent another user from following you and seeing your posts ''' 88 # TODO: not implemented 89 status = 'blocks' 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py --- a/bookwyrm/models/relationship.py +++ b/bookwyrm/models/relationship.py @@ -82,6 +82,17 @@ ''' following a user requires manual or automatic confirmation ''' status = 'follow_request' + def save(self, *args, **kwargs): + ''' make sure the follow relationship doesn't already exist ''' + try: + UserFollows.objects.get( + user_subject=self.user_subject, + user_object=self.user_object + ) + return None + except UserFollows.DoesNotExist: + return super().save(*args, **kwargs) + class UserBlocks(UserRelationship): ''' prevent another user from following you and seeing your posts '''
{"golden_diff": "diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py\n--- a/bookwyrm/models/relationship.py\n+++ b/bookwyrm/models/relationship.py\n@@ -82,6 +82,17 @@\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n \n+ def save(self, *args, **kwargs):\n+ ''' make sure the follow relationship doesn't already exist '''\n+ try:\n+ UserFollows.objects.get(\n+ user_subject=self.user_subject,\n+ user_object=self.user_object\n+ )\n+ return None\n+ except UserFollows.DoesNotExist:\n+ return super().save(*args, **kwargs)\n+\n \n class UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n", "issue": "It's possible to create a FollowRequest for a Follow that already exists\nSend a follow request.\r\nUser accepts.\r\nSend a second follow request (eg. using an old tab)\r\n\r\nBecause requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps?\n", "before_files": [{"content": "''' defines relationships between users '''\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel\n\n\nclass UserRelationship(ActivitypubMixin, BookWyrmModel):\n ''' many-to-many through table for followers '''\n user_subject = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject'\n )\n user_object = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object'\n )\n\n class Meta:\n ''' relationships should be unique '''\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n activity_mappings = [\n ActivityMapping('id', 'remote_id'),\n ActivityMapping('actor', 'user_subject'),\n ActivityMapping('object', 'user_object'),\n ]\n activity_serializer = activitypub.Follow\n\n def get_remote_id(self, status=None):\n ''' use shelf identifier in remote_id '''\n status = status or 'follows'\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, status, self.id)\n\n\n def to_accept_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Accept(\n id=self.get_remote_id(status='accepts'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\n def to_reject_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Reject(\n id=self.get_remote_id(status='rejects'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\nclass UserFollows(UserRelationship):\n ''' Following a user '''\n status = 'follows'\n\n @classmethod\n def from_request(cls, follow_request):\n ''' converts a follow request into a follow relationship '''\n return cls(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n remote_id=follow_request.remote_id,\n )\n\n\nclass UserFollowRequest(UserRelationship):\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n\n\nclass UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n # TODO: not implemented\n status = 'blocks'\n", "path": "bookwyrm/models/relationship.py"}], "after_files": [{"content": "''' defines relationships between users '''\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel\n\n\nclass UserRelationship(ActivitypubMixin, BookWyrmModel):\n ''' many-to-many through table for followers '''\n user_subject = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject'\n )\n user_object = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object'\n )\n\n class Meta:\n ''' relationships should be unique '''\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n activity_mappings = [\n ActivityMapping('id', 'remote_id'),\n ActivityMapping('actor', 'user_subject'),\n ActivityMapping('object', 'user_object'),\n ]\n activity_serializer = activitypub.Follow\n\n def get_remote_id(self, status=None):\n ''' use shelf identifier in remote_id '''\n status = status or 'follows'\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, status, self.id)\n\n\n def to_accept_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Accept(\n id=self.get_remote_id(status='accepts'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\n def to_reject_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Reject(\n id=self.get_remote_id(status='rejects'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\nclass UserFollows(UserRelationship):\n ''' Following a user '''\n status = 'follows'\n\n @classmethod\n def from_request(cls, follow_request):\n ''' converts a follow request into a follow relationship '''\n return cls(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n remote_id=follow_request.remote_id,\n )\n\n\nclass UserFollowRequest(UserRelationship):\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n\n def save(self, *args, **kwargs):\n ''' make sure the follow relationship doesn't already exist '''\n try:\n UserFollows.objects.get(\n user_subject=self.user_subject,\n user_object=self.user_object\n )\n return None\n except UserFollows.DoesNotExist:\n return super().save(*args, **kwargs)\n\n\nclass UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n # TODO: not implemented\n status = 'blocks'\n", "path": "bookwyrm/models/relationship.py"}]}
1,093
174
gh_patches_debug_3212
rasdani/github-patches
git_diff
scikit-image__scikit-image-6007
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- installation issues with setuptools 58.5.x ## Description I observed some failures on CI in a recent PR that appear to be related to use of setuptools >= 58.5.0. Example: https://github.com/scikit-image/scikit-image/runs/4104921276?check_suite_focus=true I was able to reproduce the issue locally with 58.5.0, but it does not occur for 58.4.0. A temporary workaround would be to pin setuptools <58.5 on CI. ## Way to reproduce build from source with setuptools >= 58.5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #! /usr/bin/env python 2 3 import os 4 import sys 5 import tempfile 6 import shutil 7 import builtins 8 import textwrap 9 10 import setuptools 11 from distutils.command.build_py import build_py 12 from distutils.command.sdist import sdist 13 from distutils.errors import CompileError, LinkError 14 from numpy.distutils.command.build_ext import build_ext 15 16 17 DISTNAME = 'scikit-image' 18 DESCRIPTION = 'Image processing in Python' 19 MAINTAINER = 'Stefan van der Walt' 20 MAINTAINER_EMAIL = '[email protected]' 21 URL = 'https://scikit-image.org' 22 LICENSE = 'Modified BSD' 23 DOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html' 24 PROJECT_URLS = { 25 "Bug Tracker": 'https://github.com/scikit-image/scikit-image/issues', 26 "Documentation": 'https://scikit-image.org/docs/stable/', 27 "Source Code": 'https://github.com/scikit-image/scikit-image' 28 } 29 30 with open('README.md', encoding='utf-8') as f: 31 LONG_DESCRIPTION = f.read() 32 33 if sys.version_info < (3, 6): 34 35 error = """Python {py} detected. 36 37 scikit-image 0.18+ supports only Python 3.7 and above. 38 39 For Python 2.7, please install the 0.14.x Long Term Support release using: 40 41 $ pip install 'scikit-image<0.15' 42 """.format(py='.'.join([str(v) for v in sys.version_info[:3]])) 43 44 sys.stderr.write(error + "\n") 45 sys.exit(1) 46 47 # This is a bit (!) hackish: we are setting a global variable so that the main 48 # skimage __init__ can detect if it is being loaded by the setup routine, to 49 # avoid attempting to load components that aren't built yet: 50 # the numpy distutils extensions that are used by scikit-image to recursively 51 # build the compiled extensions in sub-packages is based on the Python import 52 # machinery. 53 builtins.__SKIMAGE_SETUP__ = True 54 55 56 # Support for openmp 57 58 class ConditionalOpenMP(build_ext): 59 60 def can_compile_link(self, compile_flags, link_flags): 61 62 if "PYODIDE_PACKAGE_ABI" in os.environ: 63 # pyodide doesn't support OpenMP 64 return False 65 66 cc = self.compiler 67 fname = 'test.c' 68 cwd = os.getcwd() 69 tmpdir = tempfile.mkdtemp() 70 71 code = ("#include <omp.h>" 72 "int main(int argc, char** argv) { return(0); }") 73 74 if self.compiler.compiler_type == "msvc": 75 # make sure we build a DLL on Windows 76 local_link_flags = link_flags + ["/DLL"] 77 else: 78 local_link_flags = link_flags 79 80 try: 81 os.chdir(tmpdir) 82 with open(fname, 'wt') as fobj: 83 fobj.write(code) 84 try: 85 objects = cc.compile([fname], 86 extra_postargs=compile_flags) 87 except CompileError: 88 return False 89 try: 90 # Link shared lib rather then executable to avoid 91 # http://bugs.python.org/issue4431 with MSVC 10+ 92 cc.link_shared_lib(objects, "testlib", 93 extra_postargs=local_link_flags) 94 except (LinkError, TypeError): 95 return False 96 finally: 97 os.chdir(cwd) 98 shutil.rmtree(tmpdir) 99 return True 100 101 def build_extensions(self): 102 """ Hook into extension building to set compiler flags """ 103 104 compile_flags = list() 105 link_flags = list() 106 107 # check which compiler is being used 108 if self.compiler.compiler_type == "msvc": 109 # '-fopenmp' is called '/openmp' in msvc 110 compile_flags += ['/openmp'] 111 else: 112 compile_flags += ['-fopenmp'] 113 link_flags += ['-fopenmp'] 114 115 if self.can_compile_link(compile_flags, link_flags): 116 for ext in self.extensions: 117 ext.extra_compile_args += compile_flags 118 ext.extra_link_args += link_flags 119 120 build_ext.build_extensions(self) 121 122 123 with open('skimage/__init__.py', encoding='utf-8') as fid: 124 for line in fid: 125 if line.startswith('__version__'): 126 VERSION = line.strip().split()[-1][1:-1] 127 break 128 129 130 def parse_requirements_file(filename): 131 with open(filename, encoding='utf-8') as fid: 132 requires = [line.strip() for line in fid.readlines() if line] 133 134 return requires 135 136 137 INSTALL_REQUIRES = parse_requirements_file('requirements/default.txt') 138 # The `requirements/extras.txt` file is explicitely omitted because 139 # it contains requirements that do not have wheels uploaded to pip 140 # for the platforms we wish to support. 141 extras_require = { 142 dep: parse_requirements_file('requirements/' + dep + '.txt') 143 for dep in ['docs', 'optional', 'test', 'data'] 144 } 145 146 # requirements for those browsing PyPI 147 REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES] 148 REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES] 149 REQUIRES = [r.replace('[array]', '') for r in REQUIRES] 150 151 152 def configuration(parent_package='', top_path=None): 153 if os.path.exists('MANIFEST'): 154 os.remove('MANIFEST') 155 156 from numpy.distutils.misc_util import Configuration 157 config = Configuration(None, parent_package, top_path) 158 159 config.set_options( 160 ignore_setup_xxx_py=True, 161 assume_default_configuration=True, 162 delegate_options_to_subpackages=True, 163 quiet=True) 164 165 config.add_subpackage('skimage') 166 167 return config 168 169 170 if __name__ == "__main__": 171 cmdclass = {'build_py': build_py, 172 'sdist': sdist} 173 try: 174 # test if build dependencies exist. 175 # if not, some commands are still viable. 176 # note: this must be kept in sync with pyproject.toml 177 from numpy.distutils.core import setup 178 import cython 179 extra = {'configuration': configuration} 180 cmdclass['build_ext'] = ConditionalOpenMP 181 except ImportError: 182 if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or 183 sys.argv[1] in ('--help-commands', 184 '--version', 185 'clean', 186 'egg_info', 187 'install_egg_info', 188 'rotate', 189 'sdist')): 190 # For these actions, compilation is not required. 191 # 192 # They are required to succeed for example when pip is 193 # used to install scikit-image when Numpy/cython are not 194 # yet present in the system. 195 from setuptools import setup 196 extra = {} 197 else: 198 print(textwrap.dedent(""" 199 To install scikit-image from source, you will need NumPy 200 and Cython. 201 Install NumPy and Cython with your python package manager. 202 If you are using pip, the commands are: 203 204 pip install numpy cython 205 206 For more details, see: 207 208 https://scikit-image.org/docs/stable/install.html 209 """)) 210 sys.exit(1) 211 212 setup( 213 name=DISTNAME, 214 description=DESCRIPTION, 215 long_description=LONG_DESCRIPTION, 216 long_description_content_type="text/markdown", 217 maintainer=MAINTAINER, 218 maintainer_email=MAINTAINER_EMAIL, 219 url=URL, 220 license=LICENSE, 221 download_url=DOWNLOAD_URL, 222 project_urls=PROJECT_URLS, 223 version=VERSION, 224 classifiers=[ 225 'Development Status :: 4 - Beta', 226 'Environment :: Console', 227 'Intended Audience :: Developers', 228 'Intended Audience :: Science/Research', 229 'License :: OSI Approved :: BSD License', 230 'Programming Language :: C', 231 'Programming Language :: Python', 232 'Programming Language :: Python :: 3', 233 'Programming Language :: Python :: 3.7', 234 'Programming Language :: Python :: 3.8', 235 'Programming Language :: Python :: 3.9', 236 'Programming Language :: Python :: 3 :: Only', 237 'Topic :: Scientific/Engineering', 238 'Operating System :: Microsoft :: Windows', 239 'Operating System :: POSIX', 240 'Operating System :: Unix', 241 'Operating System :: MacOS', 242 ], 243 install_requires=INSTALL_REQUIRES, 244 requires=REQUIRES, 245 extras_require=extras_require, 246 python_requires='>=3.7', 247 packages=setuptools.find_packages(exclude=['doc', 'benchmarks']), 248 include_package_data=True, 249 zip_safe=False, # the package can run out of an .egg file 250 entry_points={ 251 'console_scripts': ['skivi = skimage.scripts.skivi:main'], 252 }, 253 cmdclass=cmdclass, 254 **extra 255 ) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ import textwrap import setuptools -from distutils.command.build_py import build_py -from distutils.command.sdist import sdist +from setuptools.command.build_py import build_py +from setuptools.command.sdist import sdist from distutils.errors import CompileError, LinkError from numpy.distutils.command.build_ext import build_ext
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,8 +8,8 @@\n import textwrap\n \n import setuptools\n-from distutils.command.build_py import build_py\n-from distutils.command.sdist import sdist\n+from setuptools.command.build_py import build_py\n+from setuptools.command.sdist import sdist\n from distutils.errors import CompileError, LinkError\n from numpy.distutils.command.build_ext import build_ext\n", "issue": "installation issues with setuptools 58.5.x\n## Description\r\n\r\nI observed some failures on CI in a recent PR that appear to be related to use of setuptools >= 58.5.0. Example: https://github.com/scikit-image/scikit-image/runs/4104921276?check_suite_focus=true\r\n\r\nI was able to reproduce the issue locally with 58.5.0, but it does not occur for 58.4.0. A temporary workaround would be to pin setuptools <58.5 on CI.\r\n\r\n\r\n\r\n## Way to reproduce\r\n\r\nbuild from source with setuptools >= 58.5\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport tempfile\nimport shutil\nimport builtins\nimport textwrap\n\nimport setuptools\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\nfrom distutils.errors import CompileError, LinkError\nfrom numpy.distutils.command.build_ext import build_ext\n\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing in Python'\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'\nPROJECT_URLS = {\n \"Bug Tracker\": 'https://github.com/scikit-image/scikit-image/issues',\n \"Documentation\": 'https://scikit-image.org/docs/stable/',\n \"Source Code\": 'https://github.com/scikit-image/scikit-image'\n}\n\nwith open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nif sys.version_info < (3, 6):\n\n error = \"\"\"Python {py} detected.\n\nscikit-image 0.18+ supports only Python 3.7 and above.\n\nFor Python 2.7, please install the 0.14.x Long Term Support release using:\n\n $ pip install 'scikit-image<0.15'\n\"\"\".format(py='.'.join([str(v) for v in sys.version_info[:3]]))\n\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\n# Support for openmp\n\nclass ConditionalOpenMP(build_ext):\n\n def can_compile_link(self, compile_flags, link_flags):\n\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # pyodide doesn't support OpenMP\n return False\n\n cc = self.compiler\n fname = 'test.c'\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n\n code = (\"#include <omp.h>\"\n \"int main(int argc, char** argv) { return(0); }\")\n\n if self.compiler.compiler_type == \"msvc\":\n # make sure we build a DLL on Windows\n local_link_flags = link_flags + [\"/DLL\"]\n else:\n local_link_flags = link_flags\n\n try:\n os.chdir(tmpdir)\n with open(fname, 'wt') as fobj:\n fobj.write(code)\n try:\n objects = cc.compile([fname],\n extra_postargs=compile_flags)\n except CompileError:\n return False\n try:\n # Link shared lib rather then executable to avoid\n # http://bugs.python.org/issue4431 with MSVC 10+\n cc.link_shared_lib(objects, \"testlib\",\n extra_postargs=local_link_flags)\n except (LinkError, TypeError):\n return False\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n return True\n\n def build_extensions(self):\n \"\"\" Hook into extension building to set compiler flags \"\"\"\n\n compile_flags = list()\n link_flags = list()\n\n # check which compiler is being used\n if self.compiler.compiler_type == \"msvc\":\n # '-fopenmp' is called '/openmp' in msvc\n compile_flags += ['/openmp']\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n\n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n ext.extra_compile_args += compile_flags\n ext.extra_link_args += link_flags\n\n build_ext.build_extensions(self)\n\n\nwith open('skimage/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\n\ndef parse_requirements_file(filename):\n with open(filename, encoding='utf-8') as fid:\n requires = [line.strip() for line in fid.readlines() if line]\n\n return requires\n\n\nINSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')\n# The `requirements/extras.txt` file is explicitely omitted because\n# it contains requirements that do not have wheels uploaded to pip\n# for the platforms we wish to support.\nextras_require = {\n dep: parse_requirements_file('requirements/' + dep + '.txt')\n for dep in ['docs', 'optional', 'test', 'data']\n}\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n\n return config\n\n\nif __name__ == \"__main__\":\n cmdclass = {'build_py': build_py,\n 'sdist': sdist}\n try:\n # test if build dependencies exist.\n # if not, some commands are still viable.\n # note: this must be kept in sync with pyproject.toml\n from numpy.distutils.core import setup\n import cython\n extra = {'configuration': configuration}\n cmdclass['build_ext'] = ConditionalOpenMP\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate',\n 'sdist')):\n # For these actions, compilation is not required.\n #\n # They are required to succeed for example when pip is\n # used to install scikit-image when Numpy/cython are not\n # yet present in the system.\n from setuptools import setup\n extra = {}\n else:\n print(textwrap.dedent(\"\"\"\n To install scikit-image from source, you will need NumPy\n and Cython.\n Install NumPy and Cython with your python package manager.\n If you are using pip, the commands are:\n\n pip install numpy cython\n\n For more details, see:\n\n https://scikit-image.org/docs/stable/install.html\n \"\"\"))\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n project_urls=PROJECT_URLS,\n version=VERSION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n requires=REQUIRES,\n extras_require=extras_require,\n python_requires='>=3.7',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n cmdclass=cmdclass,\n **extra\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport tempfile\nimport shutil\nimport builtins\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom distutils.errors import CompileError, LinkError\nfrom numpy.distutils.command.build_ext import build_ext\n\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing in Python'\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'\nPROJECT_URLS = {\n \"Bug Tracker\": 'https://github.com/scikit-image/scikit-image/issues',\n \"Documentation\": 'https://scikit-image.org/docs/stable/',\n \"Source Code\": 'https://github.com/scikit-image/scikit-image'\n}\n\nwith open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nif sys.version_info < (3, 6):\n\n error = \"\"\"Python {py} detected.\n\nscikit-image 0.18+ supports only Python 3.7 and above.\n\nFor Python 2.7, please install the 0.14.x Long Term Support release using:\n\n $ pip install 'scikit-image<0.15'\n\"\"\".format(py='.'.join([str(v) for v in sys.version_info[:3]]))\n\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\n# Support for openmp\n\nclass ConditionalOpenMP(build_ext):\n\n def can_compile_link(self, compile_flags, link_flags):\n\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # pyodide doesn't support OpenMP\n return False\n\n cc = self.compiler\n fname = 'test.c'\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n\n code = (\"#include <omp.h>\"\n \"int main(int argc, char** argv) { return(0); }\")\n\n if self.compiler.compiler_type == \"msvc\":\n # make sure we build a DLL on Windows\n local_link_flags = link_flags + [\"/DLL\"]\n else:\n local_link_flags = link_flags\n\n try:\n os.chdir(tmpdir)\n with open(fname, 'wt') as fobj:\n fobj.write(code)\n try:\n objects = cc.compile([fname],\n extra_postargs=compile_flags)\n except CompileError:\n return False\n try:\n # Link shared lib rather then executable to avoid\n # http://bugs.python.org/issue4431 with MSVC 10+\n cc.link_shared_lib(objects, \"testlib\",\n extra_postargs=local_link_flags)\n except (LinkError, TypeError):\n return False\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n return True\n\n def build_extensions(self):\n \"\"\" Hook into extension building to set compiler flags \"\"\"\n\n compile_flags = list()\n link_flags = list()\n\n # check which compiler is being used\n if self.compiler.compiler_type == \"msvc\":\n # '-fopenmp' is called '/openmp' in msvc\n compile_flags += ['/openmp']\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n\n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n ext.extra_compile_args += compile_flags\n ext.extra_link_args += link_flags\n\n build_ext.build_extensions(self)\n\n\nwith open('skimage/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\n\ndef parse_requirements_file(filename):\n with open(filename, encoding='utf-8') as fid:\n requires = [line.strip() for line in fid.readlines() if line]\n\n return requires\n\n\nINSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')\n# The `requirements/extras.txt` file is explicitely omitted because\n# it contains requirements that do not have wheels uploaded to pip\n# for the platforms we wish to support.\nextras_require = {\n dep: parse_requirements_file('requirements/' + dep + '.txt')\n for dep in ['docs', 'optional', 'test', 'data']\n}\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n\n return config\n\n\nif __name__ == \"__main__\":\n cmdclass = {'build_py': build_py,\n 'sdist': sdist}\n try:\n # test if build dependencies exist.\n # if not, some commands are still viable.\n # note: this must be kept in sync with pyproject.toml\n from numpy.distutils.core import setup\n import cython\n extra = {'configuration': configuration}\n cmdclass['build_ext'] = ConditionalOpenMP\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate',\n 'sdist')):\n # For these actions, compilation is not required.\n #\n # They are required to succeed for example when pip is\n # used to install scikit-image when Numpy/cython are not\n # yet present in the system.\n from setuptools import setup\n extra = {}\n else:\n print(textwrap.dedent(\"\"\"\n To install scikit-image from source, you will need NumPy\n and Cython.\n Install NumPy and Cython with your python package manager.\n If you are using pip, the commands are:\n\n pip install numpy cython\n\n For more details, see:\n\n https://scikit-image.org/docs/stable/install.html\n \"\"\"))\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n project_urls=PROJECT_URLS,\n version=VERSION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n requires=REQUIRES,\n extras_require=extras_require,\n python_requires='>=3.7',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n cmdclass=cmdclass,\n **extra\n )\n", "path": "setup.py"}]}
2,962
99
gh_patches_debug_36568
rasdani/github-patches
git_diff
feast-dev__feast-2240
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Redis can delete incorrect keys I haven't tested this explicitly but from the data model + this function it looks to be as if two FeatureViews have the same Entities (but are different tables) and one view is deleted the online keys for the other view will also be deleted... This seems like a bit of a flaw either in the Redis data model or in the deletion mechanism. https://github.com/feast-dev/feast/blob/ec4165396f70ab20b42246b093f777dfcc9f5277/sdk/python/feast/infra/online_stores/redis.py#L75-L88 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/feast/infra/online_stores/redis.py` Content: ``` 1 # Copyright 2021 The Feast Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import json 15 import logging 16 from datetime import datetime 17 from enum import Enum 18 from typing import ( 19 Any, 20 ByteString, 21 Callable, 22 Dict, 23 List, 24 Optional, 25 Sequence, 26 Tuple, 27 Union, 28 ) 29 30 from google.protobuf.timestamp_pb2 import Timestamp 31 from pydantic import StrictStr 32 from pydantic.typing import Literal 33 34 from feast import Entity, FeatureView, RepoConfig, utils 35 from feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix 36 from feast.infra.online_stores.online_store import OnlineStore 37 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto 38 from feast.protos.feast.types.Value_pb2 import Value as ValueProto 39 from feast.repo_config import FeastConfigBaseModel 40 from feast.usage import log_exceptions_and_usage, tracing_span 41 42 try: 43 from redis import Redis 44 from redis.cluster import RedisCluster 45 except ImportError as e: 46 from feast.errors import FeastExtrasDependencyImportError 47 48 raise FeastExtrasDependencyImportError("redis", str(e)) 49 50 logger = logging.getLogger(__name__) 51 52 53 class RedisType(str, Enum): 54 redis = "redis" 55 redis_cluster = "redis_cluster" 56 57 58 class RedisOnlineStoreConfig(FeastConfigBaseModel): 59 """Online store config for Redis store""" 60 61 type: Literal["redis"] = "redis" 62 """Online store type selector""" 63 64 redis_type: RedisType = RedisType.redis 65 """Redis type: redis or redis_cluster""" 66 67 connection_string: StrictStr = "localhost:6379" 68 """Connection string containing the host, port, and configuration parameters for Redis 69 format: host:port,parameter1,parameter2 eg. redis:6379,db=0 """ 70 71 72 class RedisOnlineStore(OnlineStore): 73 _client: Optional[Union[Redis, RedisCluster]] = None 74 75 def delete_table_values(self, config: RepoConfig, table: FeatureView): 76 client = self._get_client(config.online_store) 77 deleted_count = 0 78 pipeline = client.pipeline() 79 prefix = _redis_key_prefix(table.entities) 80 81 for _k in client.scan_iter( 82 b"".join([prefix, b"*", config.project.encode("utf8")]) 83 ): 84 pipeline.delete(_k) 85 deleted_count += 1 86 pipeline.execute() 87 88 logger.debug(f"Deleted {deleted_count} keys for {table.name}") 89 90 @log_exceptions_and_usage(online_store="redis") 91 def update( 92 self, 93 config: RepoConfig, 94 tables_to_delete: Sequence[FeatureView], 95 tables_to_keep: Sequence[FeatureView], 96 entities_to_delete: Sequence[Entity], 97 entities_to_keep: Sequence[Entity], 98 partial: bool, 99 ): 100 """ 101 We delete the keys in redis for tables/views being removed. 102 """ 103 for table in tables_to_delete: 104 self.delete_table_values(config, table) 105 106 def teardown( 107 self, 108 config: RepoConfig, 109 tables: Sequence[FeatureView], 110 entities: Sequence[Entity], 111 ): 112 """ 113 We delete the keys in redis for tables/views being removed. 114 """ 115 for table in tables: 116 self.delete_table_values(config, table) 117 118 @staticmethod 119 def _parse_connection_string(connection_string: str): 120 """ 121 Reads Redis connections string using format 122 for RedisCluster: 123 redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=... 124 for Redis: 125 redis_master:6379,db=0,ssl=true,password=... 126 """ 127 startup_nodes = [ 128 dict(zip(["host", "port"], c.split(":"))) 129 for c in connection_string.split(",") 130 if "=" not in c 131 ] 132 params = {} 133 for c in connection_string.split(","): 134 if "=" in c: 135 kv = c.split("=", 1) 136 try: 137 kv[1] = json.loads(kv[1]) 138 except json.JSONDecodeError: 139 ... 140 141 it = iter(kv) 142 params.update(dict(zip(it, it))) 143 144 return startup_nodes, params 145 146 def _get_client(self, online_store_config: RedisOnlineStoreConfig): 147 """ 148 Creates the Redis client RedisCluster or Redis depending on configuration 149 """ 150 if not self._client: 151 startup_nodes, kwargs = self._parse_connection_string( 152 online_store_config.connection_string 153 ) 154 if online_store_config.redis_type == RedisType.redis_cluster: 155 kwargs["startup_nodes"] = startup_nodes 156 self._client = RedisCluster(**kwargs) 157 else: 158 kwargs["host"] = startup_nodes[0]["host"] 159 kwargs["port"] = startup_nodes[0]["port"] 160 self._client = Redis(**kwargs) 161 return self._client 162 163 @log_exceptions_and_usage(online_store="redis") 164 def online_write_batch( 165 self, 166 config: RepoConfig, 167 table: FeatureView, 168 data: List[ 169 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]] 170 ], 171 progress: Optional[Callable[[int], Any]], 172 ) -> None: 173 online_store_config = config.online_store 174 assert isinstance(online_store_config, RedisOnlineStoreConfig) 175 176 client = self._get_client(online_store_config) 177 project = config.project 178 179 feature_view = table.name 180 ts_key = f"_ts:{feature_view}" 181 keys = [] 182 # redis pipelining optimization: send multiple commands to redis server without waiting for every reply 183 with client.pipeline() as pipe: 184 # check if a previous record under the key bin exists 185 # TODO: investigate if check and set is a better approach rather than pulling all entity ts and then setting 186 # it may be significantly slower but avoids potential (rare) race conditions 187 for entity_key, _, _, _ in data: 188 redis_key_bin = _redis_key(project, entity_key) 189 keys.append(redis_key_bin) 190 pipe.hmget(redis_key_bin, ts_key) 191 prev_event_timestamps = pipe.execute() 192 # flattening the list of lists. `hmget` does the lookup assuming a list of keys in the key bin 193 prev_event_timestamps = [i[0] for i in prev_event_timestamps] 194 195 for redis_key_bin, prev_event_time, (_, values, timestamp, _) in zip( 196 keys, prev_event_timestamps, data 197 ): 198 event_time_seconds = int(utils.make_tzaware(timestamp).timestamp()) 199 200 # ignore if event_timestamp is before the event features that are currently in the feature store 201 if prev_event_time: 202 prev_ts = Timestamp() 203 prev_ts.ParseFromString(prev_event_time) 204 if prev_ts.seconds and event_time_seconds <= prev_ts.seconds: 205 # TODO: somehow signal that it's not overwriting the current record? 206 if progress: 207 progress(1) 208 continue 209 210 ts = Timestamp() 211 ts.seconds = event_time_seconds 212 entity_hset = dict() 213 entity_hset[ts_key] = ts.SerializeToString() 214 215 for feature_name, val in values.items(): 216 f_key = _mmh3(f"{feature_view}:{feature_name}") 217 entity_hset[f_key] = val.SerializeToString() 218 219 pipe.hset(redis_key_bin, mapping=entity_hset) 220 # TODO: support expiring the entity / features in Redis 221 # otherwise entity features remain in redis until cleaned up in separate process 222 # client.expire redis_key_bin based a ttl setting 223 results = pipe.execute() 224 if progress: 225 progress(len(results)) 226 227 @log_exceptions_and_usage(online_store="redis") 228 def online_read( 229 self, 230 config: RepoConfig, 231 table: FeatureView, 232 entity_keys: List[EntityKeyProto], 233 requested_features: Optional[List[str]] = None, 234 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]: 235 online_store_config = config.online_store 236 assert isinstance(online_store_config, RedisOnlineStoreConfig) 237 238 client = self._get_client(online_store_config) 239 feature_view = table.name 240 project = config.project 241 242 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = [] 243 244 if not requested_features: 245 requested_features = [f.name for f in table.features] 246 247 hset_keys = [_mmh3(f"{feature_view}:{k}") for k in requested_features] 248 249 ts_key = f"_ts:{feature_view}" 250 hset_keys.append(ts_key) 251 requested_features.append(ts_key) 252 253 keys = [] 254 for entity_key in entity_keys: 255 redis_key_bin = _redis_key(project, entity_key) 256 keys.append(redis_key_bin) 257 with client.pipeline() as pipe: 258 for redis_key_bin in keys: 259 pipe.hmget(redis_key_bin, hset_keys) 260 with tracing_span(name="remote_call"): 261 redis_values = pipe.execute() 262 for values in redis_values: 263 features = self._get_features_for_entity( 264 values, feature_view, requested_features 265 ) 266 result.append(features) 267 return result 268 269 def _get_features_for_entity( 270 self, 271 values: List[ByteString], 272 feature_view: str, 273 requested_features: List[str], 274 ) -> Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]: 275 res_val = dict(zip(requested_features, values)) 276 277 res_ts = Timestamp() 278 ts_val = res_val.pop(f"_ts:{feature_view}") 279 if ts_val: 280 res_ts.ParseFromString(bytes(ts_val)) 281 282 res = {} 283 for feature_name, val_bin in res_val.items(): 284 val = ValueProto() 285 if val_bin: 286 val.ParseFromString(bytes(val_bin)) 287 res[feature_name] = val 288 289 if not res: 290 return None, None 291 else: 292 timestamp = datetime.fromtimestamp(res_ts.seconds) 293 return timestamp, res 294 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py --- a/sdk/python/feast/infra/online_stores/redis.py +++ b/sdk/python/feast/infra/online_stores/redis.py @@ -72,11 +72,11 @@ class RedisOnlineStore(OnlineStore): _client: Optional[Union[Redis, RedisCluster]] = None - def delete_table_values(self, config: RepoConfig, table: FeatureView): + def delete_entity_values(self, config: RepoConfig, join_keys: List[str]): client = self._get_client(config.online_store) deleted_count = 0 pipeline = client.pipeline() - prefix = _redis_key_prefix(table.entities) + prefix = _redis_key_prefix(join_keys) for _k in client.scan_iter( b"".join([prefix, b"*", config.project.encode("utf8")]) @@ -85,7 +85,7 @@ deleted_count += 1 pipeline.execute() - logger.debug(f"Deleted {deleted_count} keys for {table.name}") + logger.debug(f"Deleted {deleted_count} rows for entity {', '.join(join_keys)}") @log_exceptions_and_usage(online_store="redis") def update( @@ -98,10 +98,16 @@ partial: bool, ): """ - We delete the keys in redis for tables/views being removed. + Look for join_keys (list of entities) that are not in use anymore + (usually this happens when the last feature view that was using specific compound key is deleted) + and remove all features attached to this "join_keys". """ - for table in tables_to_delete: - self.delete_table_values(config, table) + join_keys_to_keep = set(tuple(table.entities) for table in tables_to_keep) + + join_keys_to_delete = set(tuple(table.entities) for table in tables_to_delete) + + for join_keys in join_keys_to_delete - join_keys_to_keep: + self.delete_entity_values(config, list(join_keys)) def teardown( self, @@ -112,8 +118,10 @@ """ We delete the keys in redis for tables/views being removed. """ - for table in tables: - self.delete_table_values(config, table) + join_keys_to_delete = set(tuple(table.entities) for table in tables) + + for join_keys in join_keys_to_delete: + self.delete_entity_values(config, list(join_keys)) @staticmethod def _parse_connection_string(connection_string: str):
{"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py\n--- a/sdk/python/feast/infra/online_stores/redis.py\n+++ b/sdk/python/feast/infra/online_stores/redis.py\n@@ -72,11 +72,11 @@\n class RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n \n- def delete_table_values(self, config: RepoConfig, table: FeatureView):\n+ def delete_entity_values(self, config: RepoConfig, join_keys: List[str]):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n- prefix = _redis_key_prefix(table.entities)\n+ prefix = _redis_key_prefix(join_keys)\n \n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n@@ -85,7 +85,7 @@\n deleted_count += 1\n pipeline.execute()\n \n- logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n+ logger.debug(f\"Deleted {deleted_count} rows for entity {', '.join(join_keys)}\")\n \n @log_exceptions_and_usage(online_store=\"redis\")\n def update(\n@@ -98,10 +98,16 @@\n partial: bool,\n ):\n \"\"\"\n- We delete the keys in redis for tables/views being removed.\n+ Look for join_keys (list of entities) that are not in use anymore\n+ (usually this happens when the last feature view that was using specific compound key is deleted)\n+ and remove all features attached to this \"join_keys\".\n \"\"\"\n- for table in tables_to_delete:\n- self.delete_table_values(config, table)\n+ join_keys_to_keep = set(tuple(table.entities) for table in tables_to_keep)\n+\n+ join_keys_to_delete = set(tuple(table.entities) for table in tables_to_delete)\n+\n+ for join_keys in join_keys_to_delete - join_keys_to_keep:\n+ self.delete_entity_values(config, list(join_keys))\n \n def teardown(\n self,\n@@ -112,8 +118,10 @@\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n- for table in tables:\n- self.delete_table_values(config, table)\n+ join_keys_to_delete = set(tuple(table.entities) for table in tables)\n+\n+ for join_keys in join_keys_to_delete:\n+ self.delete_entity_values(config, list(join_keys))\n \n @staticmethod\n def _parse_connection_string(connection_string: str):\n", "issue": "Redis can delete incorrect keys\nI haven't tested this explicitly but from the data model + this function it looks to be as if two FeatureViews have the same Entities (but are different tables) and one view is deleted the online keys for the other view will also be deleted... This seems like a bit of a flaw either in the Redis data model or in the deletion mechanism. \r\n\r\nhttps://github.com/feast-dev/feast/blob/ec4165396f70ab20b42246b093f777dfcc9f5277/sdk/python/feast/infra/online_stores/redis.py#L75-L88\n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n Any,\n ByteString,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, RepoConfig, utils\nfrom feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n from redis import Redis\n from redis.cluster import RedisCluster\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"redis\", str(e))\n\nlogger = logging.getLogger(__name__)\n\n\nclass RedisType(str, Enum):\n redis = \"redis\"\n redis_cluster = \"redis_cluster\"\n\n\nclass RedisOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for Redis store\"\"\"\n\n type: Literal[\"redis\"] = \"redis\"\n \"\"\"Online store type selector\"\"\"\n\n redis_type: RedisType = RedisType.redis\n \"\"\"Redis type: redis or redis_cluster\"\"\"\n\n connection_string: StrictStr = \"localhost:6379\"\n \"\"\"Connection string containing the host, port, and configuration parameters for Redis\n format: host:port,parameter1,parameter2 eg. redis:6379,db=0 \"\"\"\n\n\nclass RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n\n def delete_table_values(self, config: RepoConfig, table: FeatureView):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n prefix = _redis_key_prefix(table.entities)\n\n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n ):\n pipeline.delete(_k)\n deleted_count += 1\n pipeline.execute()\n\n logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables_to_delete:\n self.delete_table_values(config, table)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables:\n self.delete_table_values(config, table)\n\n @staticmethod\n def _parse_connection_string(connection_string: str):\n \"\"\"\n Reads Redis connections string using format\n for RedisCluster:\n redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...\n for Redis:\n redis_master:6379,db=0,ssl=true,password=...\n \"\"\"\n startup_nodes = [\n dict(zip([\"host\", \"port\"], c.split(\":\")))\n for c in connection_string.split(\",\")\n if \"=\" not in c\n ]\n params = {}\n for c in connection_string.split(\",\"):\n if \"=\" in c:\n kv = c.split(\"=\", 1)\n try:\n kv[1] = json.loads(kv[1])\n except json.JSONDecodeError:\n ...\n\n it = iter(kv)\n params.update(dict(zip(it, it)))\n\n return startup_nodes, params\n\n def _get_client(self, online_store_config: RedisOnlineStoreConfig):\n \"\"\"\n Creates the Redis client RedisCluster or Redis depending on configuration\n \"\"\"\n if not self._client:\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n if online_store_config.redis_type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n kwargs[\"host\"] = startup_nodes[0][\"host\"]\n kwargs[\"port\"] = startup_nodes[0][\"port\"]\n self._client = Redis(**kwargs)\n return self._client\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n project = config.project\n\n feature_view = table.name\n ts_key = f\"_ts:{feature_view}\"\n keys = []\n # redis pipelining optimization: send multiple commands to redis server without waiting for every reply\n with client.pipeline() as pipe:\n # check if a previous record under the key bin exists\n # TODO: investigate if check and set is a better approach rather than pulling all entity ts and then setting\n # it may be significantly slower but avoids potential (rare) race conditions\n for entity_key, _, _, _ in data:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n pipe.hmget(redis_key_bin, ts_key)\n prev_event_timestamps = pipe.execute()\n # flattening the list of lists. `hmget` does the lookup assuming a list of keys in the key bin\n prev_event_timestamps = [i[0] for i in prev_event_timestamps]\n\n for redis_key_bin, prev_event_time, (_, values, timestamp, _) in zip(\n keys, prev_event_timestamps, data\n ):\n event_time_seconds = int(utils.make_tzaware(timestamp).timestamp())\n\n # ignore if event_timestamp is before the event features that are currently in the feature store\n if prev_event_time:\n prev_ts = Timestamp()\n prev_ts.ParseFromString(prev_event_time)\n if prev_ts.seconds and event_time_seconds <= prev_ts.seconds:\n # TODO: somehow signal that it's not overwriting the current record?\n if progress:\n progress(1)\n continue\n\n ts = Timestamp()\n ts.seconds = event_time_seconds\n entity_hset = dict()\n entity_hset[ts_key] = ts.SerializeToString()\n\n for feature_name, val in values.items():\n f_key = _mmh3(f\"{feature_view}:{feature_name}\")\n entity_hset[f_key] = val.SerializeToString()\n\n pipe.hset(redis_key_bin, mapping=entity_hset)\n # TODO: support expiring the entity / features in Redis\n # otherwise entity features remain in redis until cleaned up in separate process\n # client.expire redis_key_bin based a ttl setting\n results = pipe.execute()\n if progress:\n progress(len(results))\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n feature_view = table.name\n project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n if not requested_features:\n requested_features = [f.name for f in table.features]\n\n hset_keys = [_mmh3(f\"{feature_view}:{k}\") for k in requested_features]\n\n ts_key = f\"_ts:{feature_view}\"\n hset_keys.append(ts_key)\n requested_features.append(ts_key)\n\n keys = []\n for entity_key in entity_keys:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n with client.pipeline() as pipe:\n for redis_key_bin in keys:\n pipe.hmget(redis_key_bin, hset_keys)\n with tracing_span(name=\"remote_call\"):\n redis_values = pipe.execute()\n for values in redis_values:\n features = self._get_features_for_entity(\n values, feature_view, requested_features\n )\n result.append(features)\n return result\n\n def _get_features_for_entity(\n self,\n values: List[ByteString],\n feature_view: str,\n requested_features: List[str],\n ) -> Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]:\n res_val = dict(zip(requested_features, values))\n\n res_ts = Timestamp()\n ts_val = res_val.pop(f\"_ts:{feature_view}\")\n if ts_val:\n res_ts.ParseFromString(bytes(ts_val))\n\n res = {}\n for feature_name, val_bin in res_val.items():\n val = ValueProto()\n if val_bin:\n val.ParseFromString(bytes(val_bin))\n res[feature_name] = val\n\n if not res:\n return None, None\n else:\n timestamp = datetime.fromtimestamp(res_ts.seconds)\n return timestamp, res\n", "path": "sdk/python/feast/infra/online_stores/redis.py"}], "after_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n Any,\n ByteString,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, RepoConfig, utils\nfrom feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n from redis import Redis\n from redis.cluster import RedisCluster\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"redis\", str(e))\n\nlogger = logging.getLogger(__name__)\n\n\nclass RedisType(str, Enum):\n redis = \"redis\"\n redis_cluster = \"redis_cluster\"\n\n\nclass RedisOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for Redis store\"\"\"\n\n type: Literal[\"redis\"] = \"redis\"\n \"\"\"Online store type selector\"\"\"\n\n redis_type: RedisType = RedisType.redis\n \"\"\"Redis type: redis or redis_cluster\"\"\"\n\n connection_string: StrictStr = \"localhost:6379\"\n \"\"\"Connection string containing the host, port, and configuration parameters for Redis\n format: host:port,parameter1,parameter2 eg. redis:6379,db=0 \"\"\"\n\n\nclass RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n\n def delete_entity_values(self, config: RepoConfig, join_keys: List[str]):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n prefix = _redis_key_prefix(join_keys)\n\n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n ):\n pipeline.delete(_k)\n deleted_count += 1\n pipeline.execute()\n\n logger.debug(f\"Deleted {deleted_count} rows for entity {', '.join(join_keys)}\")\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n Look for join_keys (list of entities) that are not in use anymore\n (usually this happens when the last feature view that was using specific compound key is deleted)\n and remove all features attached to this \"join_keys\".\n \"\"\"\n join_keys_to_keep = set(tuple(table.entities) for table in tables_to_keep)\n\n join_keys_to_delete = set(tuple(table.entities) for table in tables_to_delete)\n\n for join_keys in join_keys_to_delete - join_keys_to_keep:\n self.delete_entity_values(config, list(join_keys))\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n join_keys_to_delete = set(tuple(table.entities) for table in tables)\n\n for join_keys in join_keys_to_delete:\n self.delete_entity_values(config, list(join_keys))\n\n @staticmethod\n def _parse_connection_string(connection_string: str):\n \"\"\"\n Reads Redis connections string using format\n for RedisCluster:\n redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...\n for Redis:\n redis_master:6379,db=0,ssl=true,password=...\n \"\"\"\n startup_nodes = [\n dict(zip([\"host\", \"port\"], c.split(\":\")))\n for c in connection_string.split(\",\")\n if \"=\" not in c\n ]\n params = {}\n for c in connection_string.split(\",\"):\n if \"=\" in c:\n kv = c.split(\"=\", 1)\n try:\n kv[1] = json.loads(kv[1])\n except json.JSONDecodeError:\n ...\n\n it = iter(kv)\n params.update(dict(zip(it, it)))\n\n return startup_nodes, params\n\n def _get_client(self, online_store_config: RedisOnlineStoreConfig):\n \"\"\"\n Creates the Redis client RedisCluster or Redis depending on configuration\n \"\"\"\n if not self._client:\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n if online_store_config.redis_type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n kwargs[\"host\"] = startup_nodes[0][\"host\"]\n kwargs[\"port\"] = startup_nodes[0][\"port\"]\n self._client = Redis(**kwargs)\n return self._client\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n project = config.project\n\n feature_view = table.name\n ts_key = f\"_ts:{feature_view}\"\n keys = []\n # redis pipelining optimization: send multiple commands to redis server without waiting for every reply\n with client.pipeline() as pipe:\n # check if a previous record under the key bin exists\n # TODO: investigate if check and set is a better approach rather than pulling all entity ts and then setting\n # it may be significantly slower but avoids potential (rare) race conditions\n for entity_key, _, _, _ in data:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n pipe.hmget(redis_key_bin, ts_key)\n prev_event_timestamps = pipe.execute()\n # flattening the list of lists. `hmget` does the lookup assuming a list of keys in the key bin\n prev_event_timestamps = [i[0] for i in prev_event_timestamps]\n\n for redis_key_bin, prev_event_time, (_, values, timestamp, _) in zip(\n keys, prev_event_timestamps, data\n ):\n event_time_seconds = int(utils.make_tzaware(timestamp).timestamp())\n\n # ignore if event_timestamp is before the event features that are currently in the feature store\n if prev_event_time:\n prev_ts = Timestamp()\n prev_ts.ParseFromString(prev_event_time)\n if prev_ts.seconds and event_time_seconds <= prev_ts.seconds:\n # TODO: somehow signal that it's not overwriting the current record?\n if progress:\n progress(1)\n continue\n\n ts = Timestamp()\n ts.seconds = event_time_seconds\n entity_hset = dict()\n entity_hset[ts_key] = ts.SerializeToString()\n\n for feature_name, val in values.items():\n f_key = _mmh3(f\"{feature_view}:{feature_name}\")\n entity_hset[f_key] = val.SerializeToString()\n\n pipe.hset(redis_key_bin, mapping=entity_hset)\n # TODO: support expiring the entity / features in Redis\n # otherwise entity features remain in redis until cleaned up in separate process\n # client.expire redis_key_bin based a ttl setting\n results = pipe.execute()\n if progress:\n progress(len(results))\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n feature_view = table.name\n project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n if not requested_features:\n requested_features = [f.name for f in table.features]\n\n hset_keys = [_mmh3(f\"{feature_view}:{k}\") for k in requested_features]\n\n ts_key = f\"_ts:{feature_view}\"\n hset_keys.append(ts_key)\n requested_features.append(ts_key)\n\n keys = []\n for entity_key in entity_keys:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n with client.pipeline() as pipe:\n for redis_key_bin in keys:\n pipe.hmget(redis_key_bin, hset_keys)\n with tracing_span(name=\"remote_call\"):\n redis_values = pipe.execute()\n for values in redis_values:\n features = self._get_features_for_entity(\n values, feature_view, requested_features\n )\n result.append(features)\n return result\n\n def _get_features_for_entity(\n self,\n values: List[ByteString],\n feature_view: str,\n requested_features: List[str],\n ) -> Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]:\n res_val = dict(zip(requested_features, values))\n\n res_ts = Timestamp()\n ts_val = res_val.pop(f\"_ts:{feature_view}\")\n if ts_val:\n res_ts.ParseFromString(bytes(ts_val))\n\n res = {}\n for feature_name, val_bin in res_val.items():\n val = ValueProto()\n if val_bin:\n val.ParseFromString(bytes(val_bin))\n res[feature_name] = val\n\n if not res:\n return None, None\n else:\n timestamp = datetime.fromtimestamp(res_ts.seconds)\n return timestamp, res\n", "path": "sdk/python/feast/infra/online_stores/redis.py"}]}
3,443
588
gh_patches_debug_31832
rasdani/github-patches
git_diff
pypa__virtualenv-1521
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 20b1: problems with editable installs I've decided to try running `tox` on [all the Python projects I maintain](https://projects.gedmin.as/) with virtualenv 20.0.0b1, to check for regressions in the new beta. Three of the projects failed, all on Python 2.7: - gtimelog (python: No module named gtimelog) - irclog2html (ImportError: No module named zope.testrunner) - zodbbrowser (ImportError: No module named zope.testrunner) Here's how to reproduce the gtimelog issue: - git clone https://github.com/gtimelog/gtimelog - virtualenv -p python2.7 /tmp/venv - /tmp/venv/bin/pip install -e gtimelog --no-deps - /tmp/venv/bin/python -c 'import gtimelog' I expect: no output, the import should succeed. I get ``` Traceback (most recent call last): File "<string>", line 1, in <module> ImportError: No module named gtimelog ``` The `--no-deps` to pip install is there to make the test simpler and faster; I get the same failure if I do a full `pip install -e gtimelog`. I'll create a separate bug for the irclog2html/zodbbrowser issue (they're the same issue): #1518. I'm on Ubuntu 19.10. I installed virtualenv 20.0.0b1 with `pipx install --pip-args=--pre` using my system Python 3.7. `pipx runpip virtualenv list` produces ``` Package Version ------------------ -------- appdirs 1.4.3 filelock 3.0.12 importlib-metadata 1.5.0 pip 20.0.2 pkg-resources 0.0.0 setuptools 45.1.0 six 1.14.0 virtualenv 20.0.0b1 wheel 0.34.1 zipp 2.1.0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/virtualenv/create/via_global_ref/builtin/python2/site.py` Content: ``` 1 """ 2 A simple shim module to fix up things on Python 2 only. 3 4 Note: until we setup correctly the paths we can only import built-ins. 5 """ 6 import sys 7 8 9 def main(): 10 """Patch what needed, and invoke the original site.py""" 11 config = read_pyvenv() 12 sys.real_prefix = sys.base_prefix = config["base-prefix"] 13 sys.base_exec_prefix = config["base-exec-prefix"] 14 sys.base_executable = config["base-executable"] 15 global_site_package_enabled = config.get("include-system-site-packages", False) == "true" 16 rewrite_standard_library_sys_path() 17 disable_user_site_package() 18 load_host_site() 19 if global_site_package_enabled: 20 add_global_site_package() 21 22 23 def load_host_site(): 24 """trigger reload of site.py - now it will use the standard library instance that will take care of init""" 25 # the standard library will be the first element starting with the real prefix, not zip, must be present 26 custom_site_package_path = __file__ 27 reload(sys.modules["site"]) # noqa 28 29 # ensure that our expected site packages is on the sys.path 30 import os 31 32 site_packages = r""" 33 ___EXPECTED_SITE_PACKAGES___ 34 """ 35 import json 36 37 for path in json.loads(site_packages): 38 full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode("utf-8"))) 39 if full_path not in sys.path: 40 sys.path.append(full_path) 41 42 43 def read_pyvenv(): 44 """read pyvenv.cfg""" 45 os_sep = "\\" if sys.platform == "win32" else "/" # no os module here yet - poor mans version 46 config_file = "{}{}pyvenv.cfg".format(sys.prefix, os_sep) 47 with open(config_file) as file_handler: 48 lines = file_handler.readlines() 49 config = {} 50 for line in lines: 51 try: 52 split_at = line.index("=") 53 except ValueError: 54 continue # ignore bad/empty lines 55 else: 56 config[line[:split_at].strip()] = line[split_at + 1 :].strip() 57 return config 58 59 60 def rewrite_standard_library_sys_path(): 61 """Once this site file is loaded the standard library paths have already been set, fix them up""" 62 sep = "\\" if sys.platform == "win32" else "/" 63 exe_dir = sys.executable[: sys.executable.rfind(sep)] 64 for at, value in enumerate(sys.path): 65 # replace old sys prefix path starts with new 66 if value == exe_dir: 67 pass # don't fix the current executable location, notably on Windows this gets added 68 elif value.startswith(exe_dir): 69 # content inside the exe folder needs to remap to original executables folder 70 orig_exe_folder = sys.base_executable[: sys.base_executable.rfind(sep)] 71 value = "{}{}".format(orig_exe_folder, value[len(exe_dir) :]) 72 elif value.startswith(sys.prefix): 73 value = "{}{}".format(sys.base_prefix, value[len(sys.prefix) :]) 74 elif value.startswith(sys.exec_prefix): 75 value = "{}{}".format(sys.base_exec_prefix, value[len(sys.exec_prefix) :]) 76 sys.path[at] = value 77 78 79 def disable_user_site_package(): 80 """Flip the switch on enable user site package""" 81 # sys.flags is a c-extension type, so we cannot monkey patch it, replace it with a python class to flip it 82 sys.original_flags = sys.flags 83 84 class Flags(object): 85 def __init__(self): 86 self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith("_")} 87 88 sys.flags = Flags() 89 sys.flags.no_user_site = 1 90 91 92 def add_global_site_package(): 93 """add the global site package""" 94 import site 95 96 # add user site package 97 sys.flags = sys.original_flags # restore original 98 site.ENABLE_USER_SITE = None # reset user site check 99 # add the global site package to the path - use new prefix and delegate to site.py 100 orig_prefixes = None 101 try: 102 orig_prefixes = site.PREFIXES 103 site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix] 104 site.main() 105 finally: 106 site.PREFIXES = orig_prefixes 107 108 109 main() 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py --- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py +++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py @@ -22,11 +22,24 @@ def load_host_site(): """trigger reload of site.py - now it will use the standard library instance that will take care of init""" - # the standard library will be the first element starting with the real prefix, not zip, must be present - custom_site_package_path = __file__ + # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies + # because this is what pip will be using; the host site.py though may contain it's own pattern for where the + # platform and pure library paths should exist + + # notably on Ubuntu there's a patch for getsitepackages to point to + # - prefix + local/lib/pythonx.y/dist-packages + # - prefix + lib/pythonx.y/dist-packages + # while distutils.install.cmd still points both of these to + # - prefix + lib/python2.7/site-packages + + # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py, + # as this will ensure that initialization code within host site.py runs + + here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here reload(sys.modules["site"]) # noqa - # ensure that our expected site packages is on the sys.path + # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add + # them by invoking add_site_dir to trigger the processing of pth files import os site_packages = r""" @@ -34,10 +47,11 @@ """ import json + add_site_dir = sys.modules["site"].addsitedir for path in json.loads(site_packages): - full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode("utf-8"))) + full_path = os.path.abspath(os.path.join(here, path.encode("utf-8"))) if full_path not in sys.path: - sys.path.append(full_path) + add_site_dir(full_path) def read_pyvenv():
{"golden_diff": "diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n--- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n+++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n@@ -22,11 +22,24 @@\n \n def load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n- # the standard library will be the first element starting with the real prefix, not zip, must be present\n- custom_site_package_path = __file__\n+ # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n+ # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n+ # platform and pure library paths should exist\n+\n+ # notably on Ubuntu there's a patch for getsitepackages to point to\n+ # - prefix + local/lib/pythonx.y/dist-packages\n+ # - prefix + lib/pythonx.y/dist-packages\n+ # while distutils.install.cmd still points both of these to\n+ # - prefix + lib/python2.7/site-packages\n+\n+ # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n+ # as this will ensure that initialization code within host site.py runs\n+\n+ here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n reload(sys.modules[\"site\"]) # noqa\n \n- # ensure that our expected site packages is on the sys.path\n+ # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n+ # them by invoking add_site_dir to trigger the processing of pth files\n import os\n \n site_packages = r\"\"\"\n@@ -34,10 +47,11 @@\n \"\"\"\n import json\n \n+ add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n- full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode(\"utf-8\")))\n+ full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n if full_path not in sys.path:\n- sys.path.append(full_path)\n+ add_site_dir(full_path)\n \n \n def read_pyvenv():\n", "issue": "20b1: problems with editable installs\nI've decided to try running `tox` on [all the Python projects I maintain](https://projects.gedmin.as/) with virtualenv 20.0.0b1, to check for regressions in the new beta. Three of the projects failed, all on Python 2.7:\r\n\r\n- gtimelog (python: No module named gtimelog)\r\n- irclog2html (ImportError: No module named zope.testrunner)\r\n- zodbbrowser (ImportError: No module named zope.testrunner)\r\n\r\nHere's how to reproduce the gtimelog issue:\r\n\r\n- git clone https://github.com/gtimelog/gtimelog\r\n- virtualenv -p python2.7 /tmp/venv\r\n- /tmp/venv/bin/pip install -e gtimelog --no-deps\r\n- /tmp/venv/bin/python -c 'import gtimelog'\r\n\r\nI expect: no output, the import should succeed.\r\n\r\nI get\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: No module named gtimelog\r\n```\r\n\r\nThe `--no-deps` to pip install is there to make the test simpler and faster; I get the same failure if I do a full `pip install -e gtimelog`.\r\n\r\nI'll create a separate bug for the irclog2html/zodbbrowser issue (they're the same issue): #1518.\r\n\r\n\r\nI'm on Ubuntu 19.10. I installed virtualenv 20.0.0b1 with `pipx install --pip-args=--pre` using my system Python 3.7. `pipx runpip virtualenv list` produces\r\n\r\n```\r\nPackage Version \r\n------------------ --------\r\nappdirs 1.4.3 \r\nfilelock 3.0.12 \r\nimportlib-metadata 1.5.0 \r\npip 20.0.2 \r\npkg-resources 0.0.0 \r\nsetuptools 45.1.0 \r\nsix 1.14.0 \r\nvirtualenv 20.0.0b1\r\nwheel 0.34.1 \r\nzipp 2.1.0 \r\n```\n", "before_files": [{"content": "\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # the standard library will be the first element starting with the real prefix, not zip, must be present\n custom_site_package_path = __file__\n reload(sys.modules[\"site\"]) # noqa\n\n # ensure that our expected site packages is on the sys.path\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode(\"utf-8\")))\n if full_path not in sys.path:\n sys.path.append(full_path)\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n os_sep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, os_sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n sep = \"\\\\\" if sys.platform == \"win32\" else \"/\"\n exe_dir = sys.executable[: sys.executable.rfind(sep)]\n for at, value in enumerate(sys.path):\n # replace old sys prefix path starts with new\n if value == exe_dir:\n pass # don't fix the current executable location, notably on Windows this gets added\n elif value.startswith(exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = sys.base_executable[: sys.base_executable.rfind(sep)]\n value = \"{}{}\".format(orig_exe_folder, value[len(exe_dir) :])\n elif value.startswith(sys.prefix):\n value = \"{}{}\".format(sys.base_prefix, value[len(sys.prefix) :])\n elif value.startswith(sys.exec_prefix):\n value = \"{}{}\".format(sys.base_exec_prefix, value[len(sys.exec_prefix) :])\n sys.path[at] = value\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkey patch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py"}], "after_files": [{"content": "\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n # platform and pure library paths should exist\n\n # notably on Ubuntu there's a patch for getsitepackages to point to\n # - prefix + local/lib/pythonx.y/dist-packages\n # - prefix + lib/pythonx.y/dist-packages\n # while distutils.install.cmd still points both of these to\n # - prefix + lib/python2.7/site-packages\n\n # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n # as this will ensure that initialization code within host site.py runs\n\n here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n reload(sys.modules[\"site\"]) # noqa\n\n # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n # them by invoking add_site_dir to trigger the processing of pth files\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n if full_path not in sys.path:\n add_site_dir(full_path)\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n os_sep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, os_sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n sep = \"\\\\\" if sys.platform == \"win32\" else \"/\"\n exe_dir = sys.executable[: sys.executable.rfind(sep)]\n for at, value in enumerate(sys.path):\n # replace old sys prefix path starts with new\n if value == exe_dir:\n pass # don't fix the current executable location, notably on Windows this gets added\n elif value.startswith(exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = sys.base_executable[: sys.base_executable.rfind(sep)]\n value = \"{}{}\".format(orig_exe_folder, value[len(exe_dir) :])\n elif value.startswith(sys.prefix):\n value = \"{}{}\".format(sys.base_prefix, value[len(sys.prefix) :])\n elif value.startswith(sys.exec_prefix):\n value = \"{}{}\".format(sys.base_exec_prefix, value[len(sys.exec_prefix) :])\n sys.path[at] = value\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkey patch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py"}]}
1,912
565
gh_patches_debug_63093
rasdani/github-patches
git_diff
translate__pootle-4270
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PootleCommand handles obsolete translation projects `update_stores` command can make obsolete TP directory many times. It doesn't affect us much except we get extra unnecessary log messages like this: ``` set(['get_last_updated', 'get_checks', 'get_mtime', 'get_suggestion_count', 'get_last_action', 'get_wordcount_stats']) deleted from /uk/android_announcements_evernote/ cache ``` which tells us that cache for TP has been deleted. I think any `PootleCommand` shouldn't handle TP with obsolete directories. `update_stores` command will resurrect TP directory when it's added. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_app/management/commands/__init__.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 import datetime 11 import logging 12 13 from optparse import make_option 14 15 from django.core.management.base import BaseCommand, NoArgsCommand 16 17 from pootle.runner import set_sync_mode 18 from pootle_project.models import Project 19 from pootle_translationproject.models import TranslationProject 20 21 22 class PootleCommand(NoArgsCommand): 23 """Base class for handling recursive pootle store management commands.""" 24 25 shared_option_list = ( 26 make_option( 27 '--project', 28 action='append', 29 dest='projects', 30 help='Project to refresh', 31 ), 32 make_option( 33 '--language', 34 action='append', 35 dest='languages', 36 help='Language to refresh', 37 ), 38 make_option( 39 "--noinput", 40 action="store_true", 41 default=False, 42 help=u"Never prompt for input", 43 ), 44 make_option( 45 "--no-rq", 46 action="store_true", 47 default=False, 48 help=(u"Run all jobs in a single process, without " 49 "using rq workers"), 50 ), 51 ) 52 option_list = NoArgsCommand.option_list + shared_option_list 53 process_disabled_projects = False 54 55 def __init__(self, *args, **kwargs): 56 self.languages = [] 57 self.projects = [] 58 super(PootleCommand, self).__init__(*args, **kwargs) 59 60 def do_translation_project(self, tp, **options): 61 process_stores = True 62 63 if hasattr(self, "handle_translation_project"): 64 logging.info(u"Running %s over %s", self.name, tp) 65 try: 66 process_stores = self.handle_translation_project(tp, **options) 67 except Exception: 68 logging.exception(u"Failed to run %s over %s", self.name, tp) 69 return 70 71 if not process_stores: 72 return 73 74 if hasattr(self, "handle_all_stores"): 75 logging.info(u"Running %s over %s's files", self.name, tp) 76 try: 77 self.handle_all_stores(tp, **options) 78 except Exception: 79 logging.exception(u"Failed to run %s over %s's files", 80 self.name, tp) 81 return 82 elif hasattr(self, "handle_store"): 83 store_query = tp.stores.live() 84 for store in store_query.iterator(): 85 logging.info(u"Running %s over %s", 86 self.name, store.pootle_path) 87 try: 88 self.handle_store(store, **options) 89 except Exception: 90 logging.exception(u"Failed to run %s over %s", 91 self.name, store.pootle_path) 92 93 def handle_noargs(self, **options): 94 # adjust debug level to the verbosity option 95 verbosity = int(options.get('verbosity', 1)) 96 debug_levels = { 97 0: logging.ERROR, 98 1: logging.WARNING, 99 2: logging.INFO, 100 3: logging.DEBUG 101 } 102 debug_level = debug_levels.get(verbosity, logging.DEBUG) 103 logging.getLogger().setLevel(debug_level) 104 105 # reduce size of parse pool early on 106 self.name = self.__class__.__module__.split('.')[-1] 107 from pootle_store.fields import TranslationStoreFieldFile 108 TranslationStoreFieldFile._store_cache.maxsize = 2 109 TranslationStoreFieldFile._store_cache.cullsize = 2 110 TranslationProject._non_db_state_cache.maxsize = 2 111 TranslationProject._non_db_state_cache.cullsize = 2 112 113 self.projects = options.pop('projects', []) 114 self.languages = options.pop('languages', []) 115 116 # info start 117 start = datetime.datetime.now() 118 logging.info('Start running of %s', self.name) 119 120 self.handle_all(**options) 121 122 # info finish 123 end = datetime.datetime.now() 124 logging.info('All done for %s in %s', self.name, end - start) 125 126 def handle_all(self, **options): 127 if options.get("no_rq", False): 128 set_sync_mode(options.get('noinput', False)) 129 130 if self.process_disabled_projects: 131 project_query = Project.objects.all() 132 else: 133 project_query = Project.objects.enabled() 134 135 if self.projects: 136 project_query = project_query.filter(code__in=self.projects) 137 138 for project in project_query.iterator(): 139 tp_query = project.translationproject_set \ 140 .order_by('language__code') 141 142 if self.languages: 143 tp_query = tp_query.filter(language__code__in=self.languages) 144 145 for tp in tp_query.iterator(): 146 self.do_translation_project(tp, **options) 147 148 149 class BaseRunCommand(BaseCommand): 150 """Base class to build new server runners. 151 152 Based on code from `django-shoes 153 <https://bitbucket.org/mlzboy/django-shoes/>`_. 154 """ 155 156 hostport_option_list = ( 157 make_option( 158 '--host', 159 action='store', 160 dest='host', 161 default='127.0.0.1', 162 help='Hostname to listen on.', 163 ), 164 make_option( 165 '--port', 166 action='store', 167 dest='port', 168 default=8000, 169 type=int, 170 help='The TCP port to listen on.', 171 ), 172 ) 173 174 option_list = BaseCommand.option_list + hostport_option_list 175 176 def handle(self, *args, **options): 177 return self.serve_forever(*args, **options) 178 179 def get_app(self): 180 from django.contrib.staticfiles.handlers import StaticFilesHandler 181 from django.core.handlers.wsgi import WSGIHandler 182 183 app = StaticFilesHandler(WSGIHandler()) 184 return app 185 186 def serve_forever(self, *args, **kwargs): 187 raise NotImplementedError 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_app/management/commands/__init__.py b/pootle/apps/pootle_app/management/commands/__init__.py --- a/pootle/apps/pootle_app/management/commands/__init__.py +++ b/pootle/apps/pootle_app/management/commands/__init__.py @@ -136,7 +136,7 @@ project_query = project_query.filter(code__in=self.projects) for project in project_query.iterator(): - tp_query = project.translationproject_set \ + tp_query = project.translationproject_set.live() \ .order_by('language__code') if self.languages:
{"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/__init__.py b/pootle/apps/pootle_app/management/commands/__init__.py\n--- a/pootle/apps/pootle_app/management/commands/__init__.py\n+++ b/pootle/apps/pootle_app/management/commands/__init__.py\n@@ -136,7 +136,7 @@\n project_query = project_query.filter(code__in=self.projects)\n \n for project in project_query.iterator():\n- tp_query = project.translationproject_set \\\n+ tp_query = project.translationproject_set.live() \\\n .order_by('language__code')\n \n if self.languages:\n", "issue": "PootleCommand handles obsolete translation projects\n`update_stores` command can make obsolete TP directory many times. \nIt doesn't affect us much except we get extra unnecessary log messages like this:\n\n```\nset(['get_last_updated', 'get_checks', 'get_mtime', 'get_suggestion_count', 'get_last_action', 'get_wordcount_stats']) deleted from /uk/android_announcements_evernote/ cache\n```\n\nwhich tells us that cache for TP has been deleted.\n\nI think any `PootleCommand` shouldn't handle TP with obsolete directories. `update_stores` command will resurrect TP directory when it's added.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport datetime\nimport logging\n\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, NoArgsCommand\n\nfrom pootle.runner import set_sync_mode\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\n\n\nclass PootleCommand(NoArgsCommand):\n \"\"\"Base class for handling recursive pootle store management commands.\"\"\"\n\n shared_option_list = (\n make_option(\n '--project',\n action='append',\n dest='projects',\n help='Project to refresh',\n ),\n make_option(\n '--language',\n action='append',\n dest='languages',\n help='Language to refresh',\n ),\n make_option(\n \"--noinput\",\n action=\"store_true\",\n default=False,\n help=u\"Never prompt for input\",\n ),\n make_option(\n \"--no-rq\",\n action=\"store_true\",\n default=False,\n help=(u\"Run all jobs in a single process, without \"\n \"using rq workers\"),\n ),\n )\n option_list = NoArgsCommand.option_list + shared_option_list\n process_disabled_projects = False\n\n def __init__(self, *args, **kwargs):\n self.languages = []\n self.projects = []\n super(PootleCommand, self).__init__(*args, **kwargs)\n\n def do_translation_project(self, tp, **options):\n process_stores = True\n\n if hasattr(self, \"handle_translation_project\"):\n logging.info(u\"Running %s over %s\", self.name, tp)\n try:\n process_stores = self.handle_translation_project(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\", self.name, tp)\n return\n\n if not process_stores:\n return\n\n if hasattr(self, \"handle_all_stores\"):\n logging.info(u\"Running %s over %s's files\", self.name, tp)\n try:\n self.handle_all_stores(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s's files\",\n self.name, tp)\n return\n elif hasattr(self, \"handle_store\"):\n store_query = tp.stores.live()\n for store in store_query.iterator():\n logging.info(u\"Running %s over %s\",\n self.name, store.pootle_path)\n try:\n self.handle_store(store, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\",\n self.name, store.pootle_path)\n\n def handle_noargs(self, **options):\n # adjust debug level to the verbosity option\n verbosity = int(options.get('verbosity', 1))\n debug_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG\n }\n debug_level = debug_levels.get(verbosity, logging.DEBUG)\n logging.getLogger().setLevel(debug_level)\n\n # reduce size of parse pool early on\n self.name = self.__class__.__module__.split('.')[-1]\n from pootle_store.fields import TranslationStoreFieldFile\n TranslationStoreFieldFile._store_cache.maxsize = 2\n TranslationStoreFieldFile._store_cache.cullsize = 2\n TranslationProject._non_db_state_cache.maxsize = 2\n TranslationProject._non_db_state_cache.cullsize = 2\n\n self.projects = options.pop('projects', [])\n self.languages = options.pop('languages', [])\n\n # info start\n start = datetime.datetime.now()\n logging.info('Start running of %s', self.name)\n\n self.handle_all(**options)\n\n # info finish\n end = datetime.datetime.now()\n logging.info('All done for %s in %s', self.name, end - start)\n\n def handle_all(self, **options):\n if options.get(\"no_rq\", False):\n set_sync_mode(options.get('noinput', False))\n\n if self.process_disabled_projects:\n project_query = Project.objects.all()\n else:\n project_query = Project.objects.enabled()\n\n if self.projects:\n project_query = project_query.filter(code__in=self.projects)\n\n for project in project_query.iterator():\n tp_query = project.translationproject_set \\\n .order_by('language__code')\n\n if self.languages:\n tp_query = tp_query.filter(language__code__in=self.languages)\n\n for tp in tp_query.iterator():\n self.do_translation_project(tp, **options)\n\n\nclass BaseRunCommand(BaseCommand):\n \"\"\"Base class to build new server runners.\n\n Based on code from `django-shoes\n <https://bitbucket.org/mlzboy/django-shoes/>`_.\n \"\"\"\n\n hostport_option_list = (\n make_option(\n '--host',\n action='store',\n dest='host',\n default='127.0.0.1',\n help='Hostname to listen on.',\n ),\n make_option(\n '--port',\n action='store',\n dest='port',\n default=8000,\n type=int,\n help='The TCP port to listen on.',\n ),\n )\n\n option_list = BaseCommand.option_list + hostport_option_list\n\n def handle(self, *args, **options):\n return self.serve_forever(*args, **options)\n\n def get_app(self):\n from django.contrib.staticfiles.handlers import StaticFilesHandler\n from django.core.handlers.wsgi import WSGIHandler\n\n app = StaticFilesHandler(WSGIHandler())\n return app\n\n def serve_forever(self, *args, **kwargs):\n raise NotImplementedError\n", "path": "pootle/apps/pootle_app/management/commands/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport datetime\nimport logging\n\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, NoArgsCommand\n\nfrom pootle.runner import set_sync_mode\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\n\n\nclass PootleCommand(NoArgsCommand):\n \"\"\"Base class for handling recursive pootle store management commands.\"\"\"\n\n shared_option_list = (\n make_option(\n '--project',\n action='append',\n dest='projects',\n help='Project to refresh',\n ),\n make_option(\n '--language',\n action='append',\n dest='languages',\n help='Language to refresh',\n ),\n make_option(\n \"--noinput\",\n action=\"store_true\",\n default=False,\n help=u\"Never prompt for input\",\n ),\n make_option(\n \"--no-rq\",\n action=\"store_true\",\n default=False,\n help=(u\"Run all jobs in a single process, without \"\n \"using rq workers\"),\n ),\n )\n option_list = NoArgsCommand.option_list + shared_option_list\n process_disabled_projects = False\n\n def __init__(self, *args, **kwargs):\n self.languages = []\n self.projects = []\n super(PootleCommand, self).__init__(*args, **kwargs)\n\n def do_translation_project(self, tp, **options):\n process_stores = True\n\n if hasattr(self, \"handle_translation_project\"):\n logging.info(u\"Running %s over %s\", self.name, tp)\n try:\n process_stores = self.handle_translation_project(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\", self.name, tp)\n return\n\n if not process_stores:\n return\n\n if hasattr(self, \"handle_all_stores\"):\n logging.info(u\"Running %s over %s's files\", self.name, tp)\n try:\n self.handle_all_stores(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s's files\",\n self.name, tp)\n return\n elif hasattr(self, \"handle_store\"):\n store_query = tp.stores.live()\n for store in store_query.iterator():\n logging.info(u\"Running %s over %s\",\n self.name, store.pootle_path)\n try:\n self.handle_store(store, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\",\n self.name, store.pootle_path)\n\n def handle_noargs(self, **options):\n # adjust debug level to the verbosity option\n verbosity = int(options.get('verbosity', 1))\n debug_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG\n }\n debug_level = debug_levels.get(verbosity, logging.DEBUG)\n logging.getLogger().setLevel(debug_level)\n\n # reduce size of parse pool early on\n self.name = self.__class__.__module__.split('.')[-1]\n from pootle_store.fields import TranslationStoreFieldFile\n TranslationStoreFieldFile._store_cache.maxsize = 2\n TranslationStoreFieldFile._store_cache.cullsize = 2\n TranslationProject._non_db_state_cache.maxsize = 2\n TranslationProject._non_db_state_cache.cullsize = 2\n\n self.projects = options.pop('projects', [])\n self.languages = options.pop('languages', [])\n\n # info start\n start = datetime.datetime.now()\n logging.info('Start running of %s', self.name)\n\n self.handle_all(**options)\n\n # info finish\n end = datetime.datetime.now()\n logging.info('All done for %s in %s', self.name, end - start)\n\n def handle_all(self, **options):\n if options.get(\"no_rq\", False):\n set_sync_mode(options.get('noinput', False))\n\n if self.process_disabled_projects:\n project_query = Project.objects.all()\n else:\n project_query = Project.objects.enabled()\n\n if self.projects:\n project_query = project_query.filter(code__in=self.projects)\n\n for project in project_query.iterator():\n tp_query = project.translationproject_set.live() \\\n .order_by('language__code')\n\n if self.languages:\n tp_query = tp_query.filter(language__code__in=self.languages)\n\n for tp in tp_query.iterator():\n self.do_translation_project(tp, **options)\n\n\nclass BaseRunCommand(BaseCommand):\n \"\"\"Base class to build new server runners.\n\n Based on code from `django-shoes\n <https://bitbucket.org/mlzboy/django-shoes/>`_.\n \"\"\"\n\n hostport_option_list = (\n make_option(\n '--host',\n action='store',\n dest='host',\n default='127.0.0.1',\n help='Hostname to listen on.',\n ),\n make_option(\n '--port',\n action='store',\n dest='port',\n default=8000,\n type=int,\n help='The TCP port to listen on.',\n ),\n )\n\n option_list = BaseCommand.option_list + hostport_option_list\n\n def handle(self, *args, **options):\n return self.serve_forever(*args, **options)\n\n def get_app(self):\n from django.contrib.staticfiles.handlers import StaticFilesHandler\n from django.core.handlers.wsgi import WSGIHandler\n\n app = StaticFilesHandler(WSGIHandler())\n return app\n\n def serve_forever(self, *args, **kwargs):\n raise NotImplementedError\n", "path": "pootle/apps/pootle_app/management/commands/__init__.py"}]}
2,149
149
gh_patches_debug_22637
rasdani/github-patches
git_diff
CiviWiki__OpenCiviWiki-1463
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add "Following" tab to the user profile view We recently removed some features from the user Profile view and now want to reimplement them using only Django. This task will be to add a "Following" tab to the user profile view. The tab should link to a sub-path of the user profile, so it is clear to the viewer that they are viewing users that a particular user is following. ## Task All of these tasks should be done in the `accounts` app. - [ ] create a Django view called `UserFollowing` (in `accounts/views.py`) - [ ] create a URL definition `profile/<str:username>/following` that renders the `UserFollowing` view ( in `accounts/urls.py`) - [ ] ensure the `UserFollowing` view context contains a list of users that the relevant user is following (given via the `username` in the URL) - note, it should be possible to get profiles that the user follower via `user.profile.following` or just `profile.following` - [ ] create a template to render the followed usernames - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `project/accounts/views.py` Content: ``` 1 """ 2 Class based views. 3 4 This module will include views for the accounts app. 5 """ 6 7 from accounts.authentication import account_activation_token, send_activation_email 8 from accounts.forms import ProfileEditForm, UserRegistrationForm 9 from accounts.models import Profile 10 from django.conf import settings 11 from django.contrib.auth import get_user_model, login 12 from django.contrib.auth import views as auth_views 13 from django.contrib.auth.decorators import login_required 14 from django.contrib.auth.mixins import LoginRequiredMixin 15 from django.contrib.sites.shortcuts import get_current_site 16 from django.http import HttpResponseRedirect 17 from django.shortcuts import get_object_or_404, redirect 18 from django.template.response import TemplateResponse 19 from django.urls import reverse, reverse_lazy 20 from django.utils.encoding import force_str 21 from django.utils.http import urlsafe_base64_decode 22 from django.views import View 23 from django.views.generic.edit import FormView, UpdateView 24 25 26 class ProfileFollow(LoginRequiredMixin, View): 27 def get(self, request, *args, **kwargs): 28 # Prevent users from following themselves. 29 if request.user.username == kwargs["username"]: 30 pass 31 else: 32 following_profile = Profile.objects.get(user__username=kwargs["username"]) 33 34 self.request.user.profile.following.add(following_profile) 35 36 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]}) 37 38 return HttpResponseRedirect(redirect_to) 39 40 41 class ProfileUnfollow(LoginRequiredMixin, View): 42 def get(self, request, *args, **kwargs): 43 # Prevent users from following themselves. 44 if request.user.username == kwargs["username"]: 45 pass 46 else: 47 following_profile = Profile.objects.get(user__username=kwargs["username"]) 48 49 self.request.user.profile.following.remove(following_profile) 50 51 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]}) 52 53 return HttpResponseRedirect(redirect_to) 54 55 56 class RegisterView(FormView): 57 """ 58 A form view that handles user registration. 59 """ 60 61 template_name = "accounts/register/register.html" 62 form_class = UserRegistrationForm 63 success_url = "/" 64 65 def _create_user(self, form): 66 username = form.cleaned_data["username"] 67 password = form.cleaned_data["password"] 68 email = form.cleaned_data["email"] 69 user = get_user_model().objects.create_user(username, email, password) 70 return user 71 72 def _send_email(self, user): 73 domain = get_current_site(self.request).domain 74 send_activation_email(user, domain) 75 76 def _login(self, user): 77 login(self.request, user) 78 79 def form_valid(self, form): 80 user = self._create_user(form) 81 82 self._send_email(user) 83 self._login(user) 84 85 return super(RegisterView, self).form_valid(form) 86 87 88 class ProfileActivationView(View): 89 """ 90 This shows different views to the user when they are verifying 91 their account based on whether they are already verified or not. 92 """ 93 94 def get(self, request, uidb64, token): 95 96 try: 97 uid = force_str(urlsafe_base64_decode(uidb64)) 98 user = get_user_model().objects.get(pk=uid) 99 100 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist): 101 user = None 102 103 redirect_link = {"href": "/", "label": "Back to Main"} 104 105 template_var = { 106 "link": redirect_link, 107 } 108 109 if user is not None and account_activation_token.check_token(user, token): 110 profile = user.profile 111 112 if profile.is_verified: 113 template_var["title"] = "Email Already Verified" 114 template_var["content"] = "You have already verified your email." 115 else: 116 profile.is_verified = True 117 profile.save() 118 119 template_var["title"] = "Email Verification Successful" 120 template_var["content"] = "Thank you for verifying your email." 121 else: 122 # invalid link 123 template_var["title"] = "Email Verification Error" 124 template_var["content"] = "Email could not be verified" 125 126 return TemplateResponse(request, "general_message.html", template_var) 127 128 129 class PasswordResetView(auth_views.PasswordResetView): 130 template_name = "accounts/users/password_reset.html" 131 email_template_name = "accounts/users/password_reset_email.html" 132 subject_template_name = "accounts/users/password_reset_subject.txt" 133 from_email = settings.EMAIL_HOST_USER 134 success_url = reverse_lazy("accounts_password_reset_done") 135 136 137 class PasswordResetDoneView(auth_views.PasswordResetDoneView): 138 template_name = "accounts/users/password_reset_done.html" 139 140 141 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView): 142 template_name = "accounts/users/password_reset_confirm.html" 143 success_url = reverse_lazy("accounts_password_reset_complete") 144 145 146 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView): 147 template_name = "accounts/users/password_reset_complete.html" 148 149 150 class SettingsView(LoginRequiredMixin, UpdateView): 151 """A form view to edit Profile""" 152 153 login_url = "accounts_login" 154 form_class = ProfileEditForm 155 success_url = reverse_lazy("accounts_settings") 156 template_name = "accounts/settings.html" 157 158 def get_object(self, queryset=None): 159 return Profile.objects.get(user=self.request.user) 160 161 def get_initial(self): 162 profile = Profile.objects.get(user=self.request.user) 163 self.initial.update( 164 { 165 "username": profile.user.username, 166 "email": profile.user.email, 167 "first_name": profile.first_name or None, 168 "last_name": profile.last_name or None, 169 "about_me": profile.about_me or None, 170 "profile_image": profile.profile_image or None, 171 } 172 ) 173 return super(SettingsView, self).get_initial() 174 175 176 class UserProfileView(LoginRequiredMixin, View): 177 """A view that shows profile for authorized users""" 178 179 def get(self, request, username=None): 180 profile = get_object_or_404(Profile, user__username=username) 181 182 return TemplateResponse( 183 request, 184 "account.html", 185 { 186 "profile": profile, 187 }, 188 ) 189 190 191 @login_required 192 def expunge_user(request): 193 """ 194 Delete User Information 195 """ 196 197 user_model = get_user_model() 198 user = get_object_or_404(user_model, username=request.user.username) 199 200 profile = get_object_or_404(Profile, user=user) 201 202 # Expunge personally identifiable data in user 203 expunged_user_data = { 204 "is_active": False, 205 "email": "", 206 "first_name": "", 207 "last_name": "", 208 "username": f"expunged-{ user.id }", 209 } 210 user.__dict__.update(expunged_user_data) 211 user.save() 212 213 # Expunge personally identifiable data in profile 214 expunged_profile_data = { 215 "first_name": "", 216 "last_name": "", 217 "about_me": "", 218 } 219 profile.__dict__.update(expunged_profile_data) 220 profile.save() 221 222 return redirect("/") 223 ``` Path: `project/accounts/urls.py` Content: ``` 1 from accounts.views import ( 2 PasswordResetCompleteView, 3 PasswordResetConfirmView, 4 PasswordResetDoneView, 5 PasswordResetView, 6 ProfileActivationView, 7 ProfileFollow, 8 ProfileUnfollow, 9 RegisterView, 10 SettingsView, 11 UserProfileView, 12 expunge_user, 13 ) 14 from django.contrib.auth import views as auth_views 15 from django.urls import path 16 17 urlpatterns = [ 18 path( 19 "login/", 20 auth_views.LoginView.as_view(template_name="accounts/register/login.html"), 21 name="accounts_login", 22 ), 23 path("logout/", auth_views.LogoutView.as_view(), name="accounts_logout"), 24 path("register/", RegisterView.as_view(), name="accounts_register"), 25 path("settings/", SettingsView.as_view(), name="accounts_settings"), 26 path( 27 "activate_account/<uidb64>/<token>/", 28 ProfileActivationView.as_view(), 29 name="accounts_activate", 30 ), 31 path("profile/<str:username>/", UserProfileView.as_view(), name="profile"), 32 path( 33 "profile/<str:username>/follow", ProfileFollow.as_view(), name="profile-follow" 34 ), 35 path( 36 "profile/<str:username>/unfollow", 37 ProfileUnfollow.as_view(), 38 name="profile-unfollow", 39 ), 40 path( 41 "accounts/password_reset/", 42 PasswordResetView.as_view(), 43 name="accounts_password_reset", 44 ), 45 path( 46 "accounts/password_reset_done/", 47 PasswordResetDoneView.as_view(), 48 name="accounts_password_reset_done", 49 ), 50 path( 51 "accounts/password_reset_confirm/<uidb64>/<token>/", 52 PasswordResetConfirmView.as_view(), 53 name="accounts_password_reset_confirm", 54 ), 55 path( 56 "accounts/password_reset_complete/", 57 PasswordResetCompleteView.as_view(), 58 name="accounts_password_reset_complete", 59 ), 60 path("accounts/expunge/", expunge_user, name="expunge_user"), 61 ] 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/project/accounts/urls.py b/project/accounts/urls.py --- a/project/accounts/urls.py +++ b/project/accounts/urls.py @@ -8,6 +8,7 @@ ProfileUnfollow, RegisterView, SettingsView, + ProfileFollowing, UserProfileView, expunge_user, ) @@ -37,6 +38,11 @@ ProfileUnfollow.as_view(), name="profile-unfollow", ), + path( + "profile/<str:username>/following", + ProfileFollowing.as_view(), + name="profile-following", + ), path( "accounts/password_reset/", PasswordResetView.as_view(), diff --git a/project/accounts/views.py b/project/accounts/views.py --- a/project/accounts/views.py +++ b/project/accounts/views.py @@ -188,6 +188,24 @@ ) +class ProfileFollowing(LoginRequiredMixin, View): + """ + A view that shows list of profiles + that profile with given username is following + """ + + def get(self, request, username=None): + profile = get_object_or_404(Profile, user__username=username) + + return TemplateResponse( + request, + "profile_following.html", + { + "profile": profile, + }, + ) + + @login_required def expunge_user(request): """
{"golden_diff": "diff --git a/project/accounts/urls.py b/project/accounts/urls.py\n--- a/project/accounts/urls.py\n+++ b/project/accounts/urls.py\n@@ -8,6 +8,7 @@\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n+ ProfileFollowing,\n UserProfileView,\n expunge_user,\n )\n@@ -37,6 +38,11 @@\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n+ path(\n+ \"profile/<str:username>/following\",\n+ ProfileFollowing.as_view(),\n+ name=\"profile-following\",\n+ ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\ndiff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -188,6 +188,24 @@\n )\n \n \n+class ProfileFollowing(LoginRequiredMixin, View):\n+ \"\"\"\n+ A view that shows list of profiles\n+ that profile with given username is following\n+ \"\"\"\n+\n+ def get(self, request, username=None):\n+ profile = get_object_or_404(Profile, user__username=username)\n+\n+ return TemplateResponse(\n+ request,\n+ \"profile_following.html\",\n+ {\n+ \"profile\": profile,\n+ },\n+ )\n+\n+\n @login_required\n def expunge_user(request):\n \"\"\"\n", "issue": "Add \"Following\" tab to the user profile view\nWe recently removed some features from the user Profile view and now want to reimplement them using only Django.\r\n\r\nThis task will be to add a \"Following\" tab to the user profile view. The tab should link to a sub-path of the user profile, so it is clear to the viewer that they are viewing users that a particular user is following.\r\n\r\n## Task\r\nAll of these tasks should be done in the `accounts` app.\r\n\r\n- [ ] create a Django view called `UserFollowing` (in `accounts/views.py`)\r\n- [ ] create a URL definition `profile/<str:username>/following` that renders the `UserFollowing` view ( in `accounts/urls.py`)\r\n- [ ] ensure the `UserFollowing` view context contains a list of users that the relevant user is following (given via the `username` in the URL) \r\n - note, it should be possible to get profiles that the user follower via `user.profile.following` or just `profile.following`\r\n- [ ] create a template to render the followed usernames - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)\r\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n UserProfileView,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}], "after_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"profile_following.html\",\n {\n \"profile\": profile,\n },\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n ProfileFollowing,\n UserProfileView,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"profile/<str:username>/following\",\n ProfileFollowing.as_view(),\n name=\"profile-following\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}]}
3,078
308
gh_patches_debug_19966
rasdani/github-patches
git_diff
freedomofpress__securedrop-5116
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update UI/docs consistent with "New Identity" behavior change in Tor Browser 9.0 The "New Identity" behavior has changed once again in Tor Browser 9.0. There's now a dedicated button for it: ![New identity button](https://blog.torproject.org/sites/default/files/inline-images/toolbar%20update.png) So we have to update these instructions: > Thank you for exiting your session! Please select "New Identity" from the onion button in the Tor browser's toolbar to clear all history of your SecureDrop usage from this device. And also the relevant portion in the [source guide](https://docs.securedrop.org/en/release-1.1.0/source.html). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/source_app/main.py` Content: ``` 1 import operator 2 import os 3 import io 4 5 from base64 import urlsafe_b64encode 6 from datetime import datetime 7 from flask import (Blueprint, render_template, flash, redirect, url_for, g, 8 session, current_app, request, Markup, abort) 9 from flask_babel import gettext 10 from sqlalchemy.exc import IntegrityError 11 12 import store 13 14 from db import db 15 from models import Source, Submission, Reply, get_one_or_else 16 from source_app.decorators import login_required 17 from source_app.utils import (logged_in, generate_unique_codename, 18 async_genkey, normalize_timestamps, 19 valid_codename, get_entropy_estimate) 20 from source_app.forms import LoginForm 21 22 23 def make_blueprint(config): 24 view = Blueprint('main', __name__) 25 26 @view.route('/') 27 def index(): 28 return render_template('index.html') 29 30 @view.route('/generate', methods=('GET', 'POST')) 31 def generate(): 32 if logged_in(): 33 flash(gettext( 34 "You were redirected because you are already logged in. " 35 "If you want to create a new account, you should log out " 36 "first."), 37 "notification") 38 return redirect(url_for('.lookup')) 39 40 codename = generate_unique_codename(config) 41 42 # Generate a unique id for each browser tab and associate the codename with this id. 43 # This will allow retrieval of the codename displayed in the tab from which the source has 44 # clicked to proceed to /generate (ref. issue #4458) 45 tab_id = urlsafe_b64encode(os.urandom(64)).decode() 46 codenames = session.get('codenames', {}) 47 codenames[tab_id] = codename 48 session['codenames'] = codenames 49 50 session['new_user'] = True 51 return render_template('generate.html', codename=codename, tab_id=tab_id) 52 53 @view.route('/org-logo') 54 def select_logo(): 55 if os.path.exists(os.path.join(current_app.static_folder, 'i', 56 'custom_logo.png')): 57 return redirect(url_for('static', filename='i/custom_logo.png')) 58 else: 59 return redirect(url_for('static', filename='i/logo.png')) 60 61 @view.route('/create', methods=['POST']) 62 def create(): 63 if session.get('logged_in', False): 64 flash(gettext("You are already logged in. Please verify your codename below as it " + 65 "may differ from the one displayed on the previous page."), 66 'notification') 67 else: 68 tab_id = request.form['tab_id'] 69 codename = session['codenames'][tab_id] 70 session['codename'] = codename 71 72 del session['codenames'] 73 74 filesystem_id = current_app.crypto_util.hash_codename(codename) 75 76 source = Source(filesystem_id, current_app.crypto_util.display_id()) 77 db.session.add(source) 78 try: 79 db.session.commit() 80 except IntegrityError as e: 81 db.session.rollback() 82 current_app.logger.error( 83 "Attempt to create a source with duplicate codename: %s" % 84 (e,)) 85 86 # Issue 2386: don't log in on duplicates 87 del session['codename'] 88 89 # Issue 4361: Delete 'logged_in' if it's in the session 90 try: 91 del session['logged_in'] 92 except KeyError: 93 pass 94 95 abort(500) 96 else: 97 os.mkdir(current_app.storage.path(filesystem_id)) 98 99 session['logged_in'] = True 100 return redirect(url_for('.lookup')) 101 102 @view.route('/lookup', methods=('GET',)) 103 @login_required 104 def lookup(): 105 replies = [] 106 source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \ 107 .filter(Reply.deleted_by_source == False).all() # noqa 108 109 for reply in source_inbox: 110 reply_path = current_app.storage.path( 111 g.filesystem_id, 112 reply.filename, 113 ) 114 try: 115 with io.open(reply_path, "rb") as f: 116 contents = f.read() 117 reply_obj = current_app.crypto_util.decrypt(g.codename, contents) 118 reply.decrypted = reply_obj 119 except UnicodeDecodeError: 120 current_app.logger.error("Could not decode reply %s" % 121 reply.filename) 122 else: 123 reply.date = datetime.utcfromtimestamp( 124 os.stat(reply_path).st_mtime) 125 replies.append(reply) 126 127 # Sort the replies by date 128 replies.sort(key=operator.attrgetter('date'), reverse=True) 129 130 # Generate a keypair to encrypt replies from the journalist 131 # Only do this if the journalist has flagged the source as one 132 # that they would like to reply to. (Issue #140.) 133 if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \ 134 g.source.flagged: 135 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI'] 136 async_genkey(current_app.crypto_util, 137 db_uri, 138 g.filesystem_id, 139 g.codename) 140 141 return render_template( 142 'lookup.html', 143 allow_document_uploads=current_app.instance_config.allow_document_uploads, 144 codename=g.codename, 145 replies=replies, 146 flagged=g.source.flagged, 147 new_user=session.get('new_user', None), 148 haskey=current_app.crypto_util.get_fingerprint( 149 g.filesystem_id)) 150 151 @view.route('/submit', methods=('POST',)) 152 @login_required 153 def submit(): 154 allow_document_uploads = current_app.instance_config.allow_document_uploads 155 msg = request.form['msg'] 156 fh = None 157 if allow_document_uploads and 'fh' in request.files: 158 fh = request.files['fh'] 159 160 # Don't submit anything if it was an "empty" submission. #878 161 if not (msg or fh): 162 if allow_document_uploads: 163 flash(gettext( 164 "You must enter a message or choose a file to submit."), 165 "error") 166 else: 167 flash(gettext("You must enter a message."), "error") 168 return redirect(url_for('main.lookup')) 169 170 fnames = [] 171 journalist_filename = g.source.journalist_filename 172 first_submission = g.source.interaction_count == 0 173 174 if msg: 175 g.source.interaction_count += 1 176 fnames.append( 177 current_app.storage.save_message_submission( 178 g.filesystem_id, 179 g.source.interaction_count, 180 journalist_filename, 181 msg)) 182 if fh: 183 g.source.interaction_count += 1 184 fnames.append( 185 current_app.storage.save_file_submission( 186 g.filesystem_id, 187 g.source.interaction_count, 188 journalist_filename, 189 fh.filename, 190 fh.stream)) 191 192 if first_submission: 193 msg = render_template('first_submission_flashed_message.html') 194 flash(Markup(msg), "success") 195 196 else: 197 if msg and not fh: 198 html_contents = gettext('Thanks! We received your message.') 199 elif not msg and fh: 200 html_contents = gettext('Thanks! We received your document.') 201 else: 202 html_contents = gettext('Thanks! We received your message and ' 203 'document.') 204 205 msg = render_template('next_submission_flashed_message.html', 206 html_contents=html_contents) 207 flash(Markup(msg), "success") 208 209 new_submissions = [] 210 for fname in fnames: 211 submission = Submission(g.source, fname) 212 db.session.add(submission) 213 new_submissions.append(submission) 214 215 if g.source.pending: 216 g.source.pending = False 217 218 # Generate a keypair now, if there's enough entropy (issue #303) 219 # (gpg reads 300 bytes from /dev/random) 220 entropy_avail = get_entropy_estimate() 221 if entropy_avail >= 2400: 222 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI'] 223 224 async_genkey(current_app.crypto_util, 225 db_uri, 226 g.filesystem_id, 227 g.codename) 228 current_app.logger.info("generating key, entropy: {}".format( 229 entropy_avail)) 230 else: 231 current_app.logger.warn( 232 "skipping key generation. entropy: {}".format( 233 entropy_avail)) 234 235 g.source.last_updated = datetime.utcnow() 236 db.session.commit() 237 238 for sub in new_submissions: 239 store.async_add_checksum_for_file(sub) 240 241 normalize_timestamps(g.filesystem_id) 242 243 return redirect(url_for('main.lookup')) 244 245 @view.route('/delete', methods=('POST',)) 246 @login_required 247 def delete(): 248 """This deletes the reply from the source's inbox, but preserves 249 the history for journalists such that they can view conversation 250 history. 251 """ 252 253 query = Reply.query.filter_by( 254 filename=request.form['reply_filename'], 255 source_id=g.source.id) 256 reply = get_one_or_else(query, current_app.logger, abort) 257 reply.deleted_by_source = True 258 db.session.add(reply) 259 db.session.commit() 260 261 flash(gettext("Reply deleted"), "notification") 262 return redirect(url_for('.lookup')) 263 264 @view.route('/delete-all', methods=('POST',)) 265 @login_required 266 def batch_delete(): 267 replies = Reply.query.filter(Reply.source_id == g.source.id) \ 268 .filter(Reply.deleted_by_source == False).all() # noqa 269 if len(replies) == 0: 270 current_app.logger.error("Found no replies when at least one was " 271 "expected") 272 return redirect(url_for('.lookup')) 273 274 for reply in replies: 275 reply.deleted_by_source = True 276 db.session.add(reply) 277 db.session.commit() 278 279 flash(gettext("All replies have been deleted"), "notification") 280 return redirect(url_for('.lookup')) 281 282 @view.route('/login', methods=('GET', 'POST')) 283 def login(): 284 form = LoginForm() 285 if form.validate_on_submit(): 286 codename = request.form['codename'].strip() 287 if valid_codename(codename): 288 session.update(codename=codename, logged_in=True) 289 return redirect(url_for('.lookup', from_login='1')) 290 else: 291 current_app.logger.info( 292 "Login failed for invalid codename") 293 flash(gettext("Sorry, that is not a recognized codename."), 294 "error") 295 return render_template('login.html', form=form) 296 297 @view.route('/logout') 298 def logout(): 299 if logged_in(): 300 msg = render_template('logout_flashed_message.html') 301 302 # Clear the session after we render the message so it's localized 303 # If a user specified a locale, save it and restore it 304 user_locale = g.locale 305 session.clear() 306 session['locale'] = user_locale 307 308 flash(Markup(msg), "important hide-if-not-tor-browser") 309 return redirect(url_for('.index')) 310 311 return view 312 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py --- a/securedrop/source_app/main.py +++ b/securedrop/source_app/main.py @@ -296,8 +296,12 @@ @view.route('/logout') def logout(): + """ + If a user is logged in, show them a logout page that prompts them to + click the New Identity button in Tor Browser to complete their session. + Otherwise redirect to the main Source Interface page. + """ if logged_in(): - msg = render_template('logout_flashed_message.html') # Clear the session after we render the message so it's localized # If a user specified a locale, save it and restore it @@ -305,7 +309,8 @@ session.clear() session['locale'] = user_locale - flash(Markup(msg), "important hide-if-not-tor-browser") - return redirect(url_for('.index')) + return render_template('logout.html') + else: + return redirect(url_for('.index')) return view
{"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -296,8 +296,12 @@\n \n @view.route('/logout')\n def logout():\n+ \"\"\"\n+ If a user is logged in, show them a logout page that prompts them to\n+ click the New Identity button in Tor Browser to complete their session.\n+ Otherwise redirect to the main Source Interface page.\n+ \"\"\"\n if logged_in():\n- msg = render_template('logout_flashed_message.html')\n \n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n@@ -305,7 +309,8 @@\n session.clear()\n session['locale'] = user_locale\n \n- flash(Markup(msg), \"important hide-if-not-tor-browser\")\n- return redirect(url_for('.index'))\n+ return render_template('logout.html')\n+ else:\n+ return redirect(url_for('.index'))\n \n return view\n", "issue": "Update UI/docs consistent with \"New Identity\" behavior change in Tor Browser 9.0\nThe \"New Identity\" behavior has changed once again in Tor Browser 9.0. There's now a dedicated button for it:\r\n![New identity button](https://blog.torproject.org/sites/default/files/inline-images/toolbar%20update.png)\r\n\r\nSo we have to update these instructions:\r\n\r\n> Thank you for exiting your session! Please select \"New Identity\" from the onion button in the Tor browser's toolbar to clear all history of your SecureDrop usage from this device.\r\n\r\nAnd also the relevant portion in the [source guide](https://docs.securedrop.org/en/release-1.1.0/source.html).\n", "before_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = codenames\n\n session['new_user'] = True\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n flash(gettext(\"You are already logged in. Please verify your codename below as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n session['codename'] = codename\n\n del session['codenames']\n\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n allow_document_uploads=current_app.instance_config.allow_document_uploads,\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.get_fingerprint(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n allow_document_uploads = current_app.instance_config.allow_document_uploads\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}], "after_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = codenames\n\n session['new_user'] = True\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n flash(gettext(\"You are already logged in. Please verify your codename below as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n session['codename'] = codename\n\n del session['codenames']\n\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n allow_document_uploads=current_app.instance_config.allow_document_uploads,\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.get_fingerprint(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n allow_document_uploads = current_app.instance_config.allow_document_uploads\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n \"\"\"\n If a user is logged in, show them a logout page that prompts them to\n click the New Identity button in Tor Browser to complete their session.\n Otherwise redirect to the main Source Interface page.\n \"\"\"\n if logged_in():\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n return render_template('logout.html')\n else:\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]}
3,551
247
gh_patches_debug_56973
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-3126
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Saved lists pagination is broken **Describe the bug** Trying to move through pages of saved lists is broken. Instead, one moves back to all lists. **To Reproduce** Steps to reproduce the behavior: 1. Save enough lists to have at least two pages 2. Go to [`Lists -> Saved Lists`](https://bookwyrm.social/list/saved) 3. Click on `Next` 4. Wonder why the lists shown are not the ones you saved 5. Realize you're back on `All Lists` **Expected behavior** One should be able to paginate through saved lists **Instance** [bookwyrm.social](https://bookwyrm.social/) **Additional comments** I'm trying to dig through the code a bit, but I don't have time to setup dev environment and populate local dbs and whatever needs to be done. It might just be that `path` needs to be changed to `/list/saved` [here](https://github.com/bookwyrm-social/bookwyrm/blob/6667178703b93d0d1874f1fd547e97c12a2ec144/bookwyrm/views/list/lists.py#L62)? But idk, this isn't a stack I'm very intimate with 🤷 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/views/list/lists.py` Content: ``` 1 """ book list views""" 2 from django.contrib.auth.decorators import login_required 3 from django.core.paginator import Paginator 4 from django.shortcuts import redirect 5 from django.template.response import TemplateResponse 6 from django.utils.decorators import method_decorator 7 from django.views import View 8 9 from bookwyrm import forms, models 10 from bookwyrm.lists_stream import ListsStream 11 from bookwyrm.views.helpers import get_user_from_username 12 13 14 # pylint: disable=no-self-use 15 class Lists(View): 16 """book list page""" 17 18 def get(self, request): 19 """display a book list""" 20 if request.user.is_authenticated: 21 lists = ListsStream().get_list_stream(request.user) 22 else: 23 lists = models.List.objects.filter(privacy="public") 24 paginated = Paginator(lists, 12) 25 data = { 26 "lists": paginated.get_page(request.GET.get("page")), 27 "list_form": forms.ListForm(), 28 "path": "/list", 29 } 30 return TemplateResponse(request, "lists/lists.html", data) 31 32 @method_decorator(login_required, name="dispatch") 33 # pylint: disable=unused-argument 34 def post(self, request): 35 """create a book_list""" 36 form = forms.ListForm(request.POST) 37 if not form.is_valid(): 38 return redirect("lists") 39 book_list = form.save(request, commit=False) 40 41 # list should not have a group if it is not group curated 42 if not book_list.curation == "group": 43 book_list.group = None 44 book_list.save() 45 46 return redirect(book_list.local_path) 47 48 49 @method_decorator(login_required, name="dispatch") 50 class SavedLists(View): 51 """saved book list page""" 52 53 def get(self, request): 54 """display book lists""" 55 # hide lists with no approved books 56 lists = request.user.saved_lists.order_by("-updated_date") 57 58 paginated = Paginator(lists, 12) 59 data = { 60 "lists": paginated.get_page(request.GET.get("page")), 61 "list_form": forms.ListForm(), 62 "path": "/list", 63 } 64 return TemplateResponse(request, "lists/lists.html", data) 65 66 67 @method_decorator(login_required, name="dispatch") 68 class UserLists(View): 69 """a user's book list page""" 70 71 def get(self, request, username): 72 """display a book list""" 73 user = get_user_from_username(request.user, username) 74 lists = models.List.privacy_filter(request.user).filter(user=user) 75 paginated = Paginator(lists, 12) 76 77 data = { 78 "user": user, 79 "is_self": request.user.id == user.id, 80 "lists": paginated.get_page(request.GET.get("page")), 81 "list_form": forms.ListForm(), 82 "path": user.local_path + "/lists", 83 } 84 return TemplateResponse(request, "user/lists.html", data) 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py --- a/bookwyrm/views/list/lists.py +++ b/bookwyrm/views/list/lists.py @@ -59,7 +59,7 @@ data = { "lists": paginated.get_page(request.GET.get("page")), "list_form": forms.ListForm(), - "path": "/list", + "path": "/list/saved", } return TemplateResponse(request, "lists/lists.html", data)
{"golden_diff": "diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py\n--- a/bookwyrm/views/list/lists.py\n+++ b/bookwyrm/views/list/lists.py\n@@ -59,7 +59,7 @@\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n- \"path\": \"/list\",\n+ \"path\": \"/list/saved\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n", "issue": "Saved lists pagination is broken\n**Describe the bug**\r\nTrying to move through pages of saved lists is broken. Instead, one moves back to all lists.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Save enough lists to have at least two pages\r\n2. Go to [`Lists -> Saved Lists`](https://bookwyrm.social/list/saved)\r\n3. Click on `Next`\r\n4. Wonder why the lists shown are not the ones you saved\r\n5. Realize you're back on `All Lists`\r\n\r\n**Expected behavior**\r\nOne should be able to paginate through saved lists\r\n\r\n**Instance**\r\n[bookwyrm.social](https://bookwyrm.social/)\r\n\r\n**Additional comments**\r\nI'm trying to dig through the code a bit, but I don't have time to setup dev environment and populate local dbs and whatever needs to be done. It might just be that `path` needs to be changed to `/list/saved` [here](https://github.com/bookwyrm-social/bookwyrm/blob/6667178703b93d0d1874f1fd547e97c12a2ec144/bookwyrm/views/list/lists.py#L62)? But idk, this isn't a stack I'm very intimate with \ud83e\udd37\r\n\n", "before_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n if request.user.is_authenticated:\n lists = ListsStream().get_list_stream(request.user)\n else:\n lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save(request, commit=False)\n\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save()\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}], "after_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n if request.user.is_authenticated:\n lists = ListsStream().get_list_stream(request.user)\n else:\n lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save(request, commit=False)\n\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save()\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list/saved\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}]}
1,304
112
gh_patches_debug_38234
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-5559
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running asciidoc2html.py from outside folder **Version info**: qutebrowser v1.13.0 Git commit: 6e9f24bc8-dirty on master (2020-07-01 15:34:57 +0200) Backend: QtWebEngine (Chromium 80.0.3987.163) Qt: 5.15.0 **Does the bug happen if you start with `--temp-basedir`?**: **Description** If one runs `scripts/asciidoc2html.py` (or `script/mkvenv.py`) outside of the folder, this fails since the path are not relative to the root of the repo. **How to reproduce** Run `scripts/mkvenv.py` from outside the repo root. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/asciidoc2html.py` Content: ``` 1 #!/usr/bin/env python3 2 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 3 4 # Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]> 5 6 # This file is part of qutebrowser. 7 # 8 # qutebrowser is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # qutebrowser is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 20 21 """Generate the html documentation based on the asciidoc files.""" 22 23 from typing import List, Optional 24 import re 25 import os 26 import sys 27 import subprocess 28 import shutil 29 import tempfile 30 import argparse 31 import io 32 import pathlib 33 34 sys.path.insert(0, str(pathlib.Path(__file__).resolve().parents[1])) 35 36 from scripts import utils 37 38 DOC_DIR = pathlib.Path("qutebrowser/html/doc") 39 40 41 class AsciiDoc: 42 43 """Abstraction of an asciidoc subprocess.""" 44 45 FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts'] 46 47 def __init__(self, 48 asciidoc: Optional[List[str]], 49 website: Optional[str]) -> None: 50 self._cmd = None # type: Optional[List[str]] 51 self._asciidoc = asciidoc 52 self._website = website 53 self._homedir = None # type: Optional[pathlib.Path] 54 self._themedir = None # type: Optional[pathlib.Path] 55 self._tempdir = None # type: Optional[pathlib.Path] 56 self._failed = False 57 58 def prepare(self) -> None: 59 """Get the asciidoc command and create the homedir to use.""" 60 self._cmd = self._get_asciidoc_cmd() 61 self._homedir = pathlib.Path(tempfile.mkdtemp()) 62 self._themedir = self._homedir / '.asciidoc' / 'themes' / 'qute' 63 self._tempdir = self._homedir / 'tmp' 64 self._tempdir.mkdir(parents=True) 65 self._themedir.mkdir(parents=True) 66 67 def cleanup(self) -> None: 68 """Clean up the temporary home directory for asciidoc.""" 69 if self._homedir is not None and not self._failed: 70 shutil.rmtree(str(self._homedir)) 71 72 def build(self) -> None: 73 """Build either the website or the docs.""" 74 if self._website: 75 self._build_website() 76 else: 77 self._build_docs() 78 self._copy_images() 79 80 def _build_docs(self) -> None: 81 """Render .asciidoc files to .html sites.""" 82 files = [(pathlib.Path('doc/{}.asciidoc'.format(f)), 83 DOC_DIR / (f + ".html")) for f in self.FILES] 84 for src in pathlib.Path('doc/help/').glob('*.asciidoc'): 85 dst = DOC_DIR / (src.stem + ".html") 86 files.append((src, dst)) 87 88 # patch image links to use local copy 89 replacements = [ 90 ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png", 91 "qute://help/img/cheatsheet-big.png"), 92 ("https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png", 93 "qute://help/img/cheatsheet-small.png") 94 ] 95 asciidoc_args = ['-a', 'source-highlighter=pygments'] 96 97 for src, dst in files: 98 assert self._tempdir is not None # for mypy 99 modified_src = self._tempdir / src.name 100 with modified_src.open('w', encoding='utf-8') as moded_f, \ 101 src.open('r', encoding='utf-8') as f: 102 for line in f: 103 for orig, repl in replacements: 104 line = line.replace(orig, repl) 105 moded_f.write(line) 106 self.call(modified_src, dst, *asciidoc_args) 107 108 def _copy_images(self) -> None: 109 """Copy image files to qutebrowser/html/doc.""" 110 print("Copying files...") 111 dst_path = DOC_DIR / 'img' 112 dst_path.mkdir(exist_ok=True) 113 for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']: 114 src = pathlib.Path('doc') / 'img' / filename 115 dst = dst_path / filename 116 shutil.copy(str(src), str(dst)) 117 118 def _build_website_file(self, root: pathlib.Path, filename: str) -> None: 119 """Build a single website file.""" 120 src = root / filename 121 assert self._website is not None # for mypy 122 dst = pathlib.Path(self._website) 123 dst = dst / src.parent.relative_to('.') / (src.stem + ".html") 124 dst.parent.mkdir(exist_ok=True) 125 126 assert self._tempdir is not None # for mypy 127 modified_src = self._tempdir / src.name 128 shutil.copy('www/header.asciidoc', modified_src) 129 130 outfp = io.StringIO() 131 132 header = modified_src.read_text(encoding='utf-8') 133 header += "\n\n" 134 135 with src.open('r', encoding='utf-8') as infp: 136 outfp.write("\n\n") 137 hidden = False 138 found_title = False 139 title = "" 140 last_line = "" 141 142 for line in infp: 143 line = line.rstrip() 144 if line == '// QUTE_WEB_HIDE': 145 assert not hidden 146 hidden = True 147 elif line == '// QUTE_WEB_HIDE_END': 148 assert hidden 149 hidden = False 150 elif line == "The Compiler <[email protected]>": 151 continue 152 elif re.fullmatch(r':\w+:.*', line): 153 # asciidoc field 154 continue 155 156 if not found_title: 157 if re.fullmatch(r'=+', line): 158 line = line.replace('=', '-') 159 found_title = True 160 title = last_line + " | qutebrowser\n" 161 title += "=" * (len(title) - 1) 162 elif re.fullmatch(r'= .+', line): 163 line = '==' + line[1:] 164 found_title = True 165 title = last_line + " | qutebrowser\n" 166 title += "=" * (len(title) - 1) 167 168 if not hidden: 169 outfp.write(line.replace(".asciidoc[", ".html[") + '\n') 170 last_line = line 171 172 current_lines = outfp.getvalue() 173 outfp.close() 174 175 modified_str = title + "\n\n" + header + current_lines 176 modified_src.write_text(modified_str, encoding='utf-8') 177 178 asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual', 179 '-a', 'source-highlighter=pygments'] 180 self.call(modified_src, dst, *asciidoc_args) 181 182 def _build_website(self) -> None: 183 """Prepare and build the website.""" 184 theme_file = (pathlib.Path('www') / 'qute.css').resolve() 185 assert self._themedir is not None # for mypy 186 shutil.copy(theme_file, self._themedir) 187 188 assert self._website is not None # for mypy 189 outdir = pathlib.Path(self._website) 190 191 for item_path in pathlib.Path().rglob('*.asciidoc'): 192 if item_path.stem in ['header', 'OpenSans-License']: 193 continue 194 self._build_website_file(item_path.parent, item_path.name) 195 196 copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'} 197 198 for src, dest in copy.items(): 199 full_dest = outdir / dest 200 try: 201 shutil.rmtree(full_dest) 202 except FileNotFoundError: 203 pass 204 shutil.copytree(src, full_dest) 205 206 for dst, link_name in [ 207 ('README.html', 'index.html'), 208 ((pathlib.Path('doc') / 'quickstart.html'), 209 'quickstart.html')]: 210 assert isinstance(dst, (str, pathlib.Path)) # for mypy 211 try: 212 (outdir / link_name).symlink_to(dst) 213 except FileExistsError: 214 pass 215 216 def _get_asciidoc_cmd(self) -> List[str]: 217 """Try to find out what commandline to use to invoke asciidoc.""" 218 if self._asciidoc is not None: 219 return self._asciidoc 220 221 try: 222 subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL, 223 stderr=subprocess.DEVNULL, check=True) 224 except OSError: 225 pass 226 else: 227 return ['asciidoc'] 228 229 try: 230 subprocess.run(['asciidoc.py'], stdout=subprocess.DEVNULL, 231 stderr=subprocess.DEVNULL, check=True) 232 except OSError: 233 pass 234 else: 235 return ['asciidoc.py'] 236 237 raise FileNotFoundError 238 239 def call(self, src: pathlib.Path, dst: pathlib.Path, *args): 240 """Call asciidoc for the given files. 241 242 Args: 243 src: The source .asciidoc file. 244 dst: The destination .html file, or None to auto-guess. 245 *args: Additional arguments passed to asciidoc. 246 """ 247 print("Calling asciidoc for {}...".format(src.name)) 248 assert self._cmd is not None # for mypy 249 cmdline = self._cmd[:] 250 if dst is not None: 251 cmdline += ['--out-file', str(dst)] 252 cmdline += args 253 cmdline.append(str(src)) 254 255 # So the virtualenv's Pygments is found 256 bin_path = pathlib.Path(sys.executable).parent 257 258 try: 259 env = os.environ.copy() 260 env['HOME'] = str(self._homedir) 261 env['PATH'] = str(bin_path) + os.pathsep + env['PATH'] 262 subprocess.run(cmdline, check=True, env=env) 263 except (subprocess.CalledProcessError, OSError) as e: 264 self._failed = True 265 utils.print_error(str(e)) 266 print("Keeping modified sources in {}.".format(self._homedir), 267 file=sys.stderr) 268 sys.exit(1) 269 270 271 def parse_args() -> argparse.Namespace: 272 """Parse command-line arguments.""" 273 parser = argparse.ArgumentParser() 274 parser.add_argument('--website', help="Build website into a given " 275 "directory.") 276 parser.add_argument('--asciidoc', help="Full path to python and " 277 "asciidoc.py. If not given, it's searched in PATH.", 278 nargs=2, required=False, 279 metavar=('PYTHON', 'ASCIIDOC')) 280 return parser.parse_args() 281 282 283 def run(**kwargs) -> None: 284 """Regenerate documentation.""" 285 DOC_DIR.mkdir(exist_ok=True) 286 287 asciidoc = AsciiDoc(**kwargs) 288 try: 289 asciidoc.prepare() 290 except FileNotFoundError: 291 utils.print_error("Could not find asciidoc! Please install it, or use " 292 "the --asciidoc argument to point this script to " 293 "the correct python/asciidoc.py location!") 294 sys.exit(1) 295 296 try: 297 asciidoc.build() 298 finally: 299 asciidoc.cleanup() 300 301 302 def main(colors: bool = False) -> None: 303 """Generate html files for the online documentation.""" 304 utils.change_cwd() 305 utils.use_color = colors 306 args = parse_args() 307 run(asciidoc=args.asciidoc, website=args.website) 308 309 310 if __name__ == '__main__': 311 main(colors=True) 312 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/asciidoc2html.py b/scripts/asciidoc2html.py --- a/scripts/asciidoc2html.py +++ b/scripts/asciidoc2html.py @@ -35,7 +35,8 @@ from scripts import utils -DOC_DIR = pathlib.Path("qutebrowser/html/doc") +REPO_ROOT = pathlib.Path(__file__).parent.parent +DOC_DIR = REPO_ROOT / 'qutebrowser' / 'html' / 'doc' class AsciiDoc: @@ -79,9 +80,9 @@ def _build_docs(self) -> None: """Render .asciidoc files to .html sites.""" - files = [(pathlib.Path('doc/{}.asciidoc'.format(f)), + files = [(REPO_ROOT / 'doc' / '{}.asciidoc'.format(f)), DOC_DIR / (f + ".html")) for f in self.FILES] - for src in pathlib.Path('doc/help/').glob('*.asciidoc'): + for src in (REPO_ROOT / 'doc' / 'help').glob('*.asciidoc'): dst = DOC_DIR / (src.stem + ".html") files.append((src, dst)) @@ -111,7 +112,7 @@ dst_path = DOC_DIR / 'img' dst_path.mkdir(exist_ok=True) for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']: - src = pathlib.Path('doc') / 'img' / filename + src = REPO_ROOT / 'doc' / 'img' / filename dst = dst_path / filename shutil.copy(str(src), str(dst)) @@ -125,7 +126,7 @@ assert self._tempdir is not None # for mypy modified_src = self._tempdir / src.name - shutil.copy('www/header.asciidoc', modified_src) + shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src) outfp = io.StringIO() @@ -181,7 +182,7 @@ def _build_website(self) -> None: """Prepare and build the website.""" - theme_file = (pathlib.Path('www') / 'qute.css').resolve() + theme_file = REPO_ROOT / 'www' / 'qute.css' assert self._themedir is not None # for mypy shutil.copy(theme_file, self._themedir) @@ -205,7 +206,7 @@ for dst, link_name in [ ('README.html', 'index.html'), - ((pathlib.Path('doc') / 'quickstart.html'), + ((REPO_ROOT / 'doc' / 'quickstart.html'), 'quickstart.html')]: assert isinstance(dst, (str, pathlib.Path)) # for mypy try:
{"golden_diff": "diff --git a/scripts/asciidoc2html.py b/scripts/asciidoc2html.py\n--- a/scripts/asciidoc2html.py\n+++ b/scripts/asciidoc2html.py\n@@ -35,7 +35,8 @@\n \n from scripts import utils\n \n-DOC_DIR = pathlib.Path(\"qutebrowser/html/doc\")\n+REPO_ROOT = pathlib.Path(__file__).parent.parent\n+DOC_DIR = REPO_ROOT / 'qutebrowser' / 'html' / 'doc'\n \n \n class AsciiDoc:\n@@ -79,9 +80,9 @@\n \n def _build_docs(self) -> None:\n \"\"\"Render .asciidoc files to .html sites.\"\"\"\n- files = [(pathlib.Path('doc/{}.asciidoc'.format(f)),\n+ files = [(REPO_ROOT / 'doc' / '{}.asciidoc'.format(f)),\n DOC_DIR / (f + \".html\")) for f in self.FILES]\n- for src in pathlib.Path('doc/help/').glob('*.asciidoc'):\n+ for src in (REPO_ROOT / 'doc' / 'help').glob('*.asciidoc'):\n dst = DOC_DIR / (src.stem + \".html\")\n files.append((src, dst))\n \n@@ -111,7 +112,7 @@\n dst_path = DOC_DIR / 'img'\n dst_path.mkdir(exist_ok=True)\n for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']:\n- src = pathlib.Path('doc') / 'img' / filename\n+ src = REPO_ROOT / 'doc' / 'img' / filename\n dst = dst_path / filename\n shutil.copy(str(src), str(dst))\n \n@@ -125,7 +126,7 @@\n \n assert self._tempdir is not None # for mypy\n modified_src = self._tempdir / src.name\n- shutil.copy('www/header.asciidoc', modified_src)\n+ shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src)\n \n outfp = io.StringIO()\n \n@@ -181,7 +182,7 @@\n \n def _build_website(self) -> None:\n \"\"\"Prepare and build the website.\"\"\"\n- theme_file = (pathlib.Path('www') / 'qute.css').resolve()\n+ theme_file = REPO_ROOT / 'www' / 'qute.css'\n assert self._themedir is not None # for mypy\n shutil.copy(theme_file, self._themedir)\n \n@@ -205,7 +206,7 @@\n \n for dst, link_name in [\n ('README.html', 'index.html'),\n- ((pathlib.Path('doc') / 'quickstart.html'),\n+ ((REPO_ROOT / 'doc' / 'quickstart.html'),\n 'quickstart.html')]:\n assert isinstance(dst, (str, pathlib.Path)) # for mypy\n try:\n", "issue": "Running asciidoc2html.py from outside folder\n**Version info**:\r\nqutebrowser v1.13.0 \r\nGit commit: 6e9f24bc8-dirty on master (2020-07-01 15:34:57 +0200)\r\nBackend: QtWebEngine (Chromium 80.0.3987.163)\r\nQt: 5.15.0 \r\n\r\n**Does the bug happen if you start with `--temp-basedir`?**:\r\n\r\n**Description**\r\nIf one runs `scripts/asciidoc2html.py` (or `script/mkvenv.py`) outside of the folder, this fails since the path are not relative to the root of the repo.\r\n\r\n**How to reproduce**\r\nRun `scripts/mkvenv.py` from outside the repo root.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Generate the html documentation based on the asciidoc files.\"\"\"\n\nfrom typing import List, Optional\nimport re\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport tempfile\nimport argparse\nimport io\nimport pathlib\n\nsys.path.insert(0, str(pathlib.Path(__file__).resolve().parents[1]))\n\nfrom scripts import utils\n\nDOC_DIR = pathlib.Path(\"qutebrowser/html/doc\")\n\n\nclass AsciiDoc:\n\n \"\"\"Abstraction of an asciidoc subprocess.\"\"\"\n\n FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts']\n\n def __init__(self,\n asciidoc: Optional[List[str]],\n website: Optional[str]) -> None:\n self._cmd = None # type: Optional[List[str]]\n self._asciidoc = asciidoc\n self._website = website\n self._homedir = None # type: Optional[pathlib.Path]\n self._themedir = None # type: Optional[pathlib.Path]\n self._tempdir = None # type: Optional[pathlib.Path]\n self._failed = False\n\n def prepare(self) -> None:\n \"\"\"Get the asciidoc command and create the homedir to use.\"\"\"\n self._cmd = self._get_asciidoc_cmd()\n self._homedir = pathlib.Path(tempfile.mkdtemp())\n self._themedir = self._homedir / '.asciidoc' / 'themes' / 'qute'\n self._tempdir = self._homedir / 'tmp'\n self._tempdir.mkdir(parents=True)\n self._themedir.mkdir(parents=True)\n\n def cleanup(self) -> None:\n \"\"\"Clean up the temporary home directory for asciidoc.\"\"\"\n if self._homedir is not None and not self._failed:\n shutil.rmtree(str(self._homedir))\n\n def build(self) -> None:\n \"\"\"Build either the website or the docs.\"\"\"\n if self._website:\n self._build_website()\n else:\n self._build_docs()\n self._copy_images()\n\n def _build_docs(self) -> None:\n \"\"\"Render .asciidoc files to .html sites.\"\"\"\n files = [(pathlib.Path('doc/{}.asciidoc'.format(f)),\n DOC_DIR / (f + \".html\")) for f in self.FILES]\n for src in pathlib.Path('doc/help/').glob('*.asciidoc'):\n dst = DOC_DIR / (src.stem + \".html\")\n files.append((src, dst))\n\n # patch image links to use local copy\n replacements = [\n (\"https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png\",\n \"qute://help/img/cheatsheet-big.png\"),\n (\"https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png\",\n \"qute://help/img/cheatsheet-small.png\")\n ]\n asciidoc_args = ['-a', 'source-highlighter=pygments']\n\n for src, dst in files:\n assert self._tempdir is not None # for mypy\n modified_src = self._tempdir / src.name\n with modified_src.open('w', encoding='utf-8') as moded_f, \\\n src.open('r', encoding='utf-8') as f:\n for line in f:\n for orig, repl in replacements:\n line = line.replace(orig, repl)\n moded_f.write(line)\n self.call(modified_src, dst, *asciidoc_args)\n\n def _copy_images(self) -> None:\n \"\"\"Copy image files to qutebrowser/html/doc.\"\"\"\n print(\"Copying files...\")\n dst_path = DOC_DIR / 'img'\n dst_path.mkdir(exist_ok=True)\n for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']:\n src = pathlib.Path('doc') / 'img' / filename\n dst = dst_path / filename\n shutil.copy(str(src), str(dst))\n\n def _build_website_file(self, root: pathlib.Path, filename: str) -> None:\n \"\"\"Build a single website file.\"\"\"\n src = root / filename\n assert self._website is not None # for mypy\n dst = pathlib.Path(self._website)\n dst = dst / src.parent.relative_to('.') / (src.stem + \".html\")\n dst.parent.mkdir(exist_ok=True)\n\n assert self._tempdir is not None # for mypy\n modified_src = self._tempdir / src.name\n shutil.copy('www/header.asciidoc', modified_src)\n\n outfp = io.StringIO()\n\n header = modified_src.read_text(encoding='utf-8')\n header += \"\\n\\n\"\n\n with src.open('r', encoding='utf-8') as infp:\n outfp.write(\"\\n\\n\")\n hidden = False\n found_title = False\n title = \"\"\n last_line = \"\"\n\n for line in infp:\n line = line.rstrip()\n if line == '// QUTE_WEB_HIDE':\n assert not hidden\n hidden = True\n elif line == '// QUTE_WEB_HIDE_END':\n assert hidden\n hidden = False\n elif line == \"The Compiler <[email protected]>\":\n continue\n elif re.fullmatch(r':\\w+:.*', line):\n # asciidoc field\n continue\n\n if not found_title:\n if re.fullmatch(r'=+', line):\n line = line.replace('=', '-')\n found_title = True\n title = last_line + \" | qutebrowser\\n\"\n title += \"=\" * (len(title) - 1)\n elif re.fullmatch(r'= .+', line):\n line = '==' + line[1:]\n found_title = True\n title = last_line + \" | qutebrowser\\n\"\n title += \"=\" * (len(title) - 1)\n\n if not hidden:\n outfp.write(line.replace(\".asciidoc[\", \".html[\") + '\\n')\n last_line = line\n\n current_lines = outfp.getvalue()\n outfp.close()\n\n modified_str = title + \"\\n\\n\" + header + current_lines\n modified_src.write_text(modified_str, encoding='utf-8')\n\n asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual',\n '-a', 'source-highlighter=pygments']\n self.call(modified_src, dst, *asciidoc_args)\n\n def _build_website(self) -> None:\n \"\"\"Prepare and build the website.\"\"\"\n theme_file = (pathlib.Path('www') / 'qute.css').resolve()\n assert self._themedir is not None # for mypy\n shutil.copy(theme_file, self._themedir)\n\n assert self._website is not None # for mypy\n outdir = pathlib.Path(self._website)\n\n for item_path in pathlib.Path().rglob('*.asciidoc'):\n if item_path.stem in ['header', 'OpenSans-License']:\n continue\n self._build_website_file(item_path.parent, item_path.name)\n\n copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'}\n\n for src, dest in copy.items():\n full_dest = outdir / dest\n try:\n shutil.rmtree(full_dest)\n except FileNotFoundError:\n pass\n shutil.copytree(src, full_dest)\n\n for dst, link_name in [\n ('README.html', 'index.html'),\n ((pathlib.Path('doc') / 'quickstart.html'),\n 'quickstart.html')]:\n assert isinstance(dst, (str, pathlib.Path)) # for mypy\n try:\n (outdir / link_name).symlink_to(dst)\n except FileExistsError:\n pass\n\n def _get_asciidoc_cmd(self) -> List[str]:\n \"\"\"Try to find out what commandline to use to invoke asciidoc.\"\"\"\n if self._asciidoc is not None:\n return self._asciidoc\n\n try:\n subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, check=True)\n except OSError:\n pass\n else:\n return ['asciidoc']\n\n try:\n subprocess.run(['asciidoc.py'], stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, check=True)\n except OSError:\n pass\n else:\n return ['asciidoc.py']\n\n raise FileNotFoundError\n\n def call(self, src: pathlib.Path, dst: pathlib.Path, *args):\n \"\"\"Call asciidoc for the given files.\n\n Args:\n src: The source .asciidoc file.\n dst: The destination .html file, or None to auto-guess.\n *args: Additional arguments passed to asciidoc.\n \"\"\"\n print(\"Calling asciidoc for {}...\".format(src.name))\n assert self._cmd is not None # for mypy\n cmdline = self._cmd[:]\n if dst is not None:\n cmdline += ['--out-file', str(dst)]\n cmdline += args\n cmdline.append(str(src))\n\n # So the virtualenv's Pygments is found\n bin_path = pathlib.Path(sys.executable).parent\n\n try:\n env = os.environ.copy()\n env['HOME'] = str(self._homedir)\n env['PATH'] = str(bin_path) + os.pathsep + env['PATH']\n subprocess.run(cmdline, check=True, env=env)\n except (subprocess.CalledProcessError, OSError) as e:\n self._failed = True\n utils.print_error(str(e))\n print(\"Keeping modified sources in {}.\".format(self._homedir),\n file=sys.stderr)\n sys.exit(1)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command-line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--website', help=\"Build website into a given \"\n \"directory.\")\n parser.add_argument('--asciidoc', help=\"Full path to python and \"\n \"asciidoc.py. If not given, it's searched in PATH.\",\n nargs=2, required=False,\n metavar=('PYTHON', 'ASCIIDOC'))\n return parser.parse_args()\n\n\ndef run(**kwargs) -> None:\n \"\"\"Regenerate documentation.\"\"\"\n DOC_DIR.mkdir(exist_ok=True)\n\n asciidoc = AsciiDoc(**kwargs)\n try:\n asciidoc.prepare()\n except FileNotFoundError:\n utils.print_error(\"Could not find asciidoc! Please install it, or use \"\n \"the --asciidoc argument to point this script to \"\n \"the correct python/asciidoc.py location!\")\n sys.exit(1)\n\n try:\n asciidoc.build()\n finally:\n asciidoc.cleanup()\n\n\ndef main(colors: bool = False) -> None:\n \"\"\"Generate html files for the online documentation.\"\"\"\n utils.change_cwd()\n utils.use_color = colors\n args = parse_args()\n run(asciidoc=args.asciidoc, website=args.website)\n\n\nif __name__ == '__main__':\n main(colors=True)\n", "path": "scripts/asciidoc2html.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2020 Florian Bruhin (The Compiler) <[email protected]>\n\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Generate the html documentation based on the asciidoc files.\"\"\"\n\nfrom typing import List, Optional\nimport re\nimport os\nimport sys\nimport subprocess\nimport shutil\nimport tempfile\nimport argparse\nimport io\nimport pathlib\n\nsys.path.insert(0, str(pathlib.Path(__file__).resolve().parents[1]))\n\nfrom scripts import utils\n\nREPO_ROOT = pathlib.Path(__file__).parent.parent\nDOC_DIR = REPO_ROOT / 'qutebrowser' / 'html' / 'doc'\n\n\nclass AsciiDoc:\n\n \"\"\"Abstraction of an asciidoc subprocess.\"\"\"\n\n FILES = ['faq', 'changelog', 'contributing', 'quickstart', 'userscripts']\n\n def __init__(self,\n asciidoc: Optional[List[str]],\n website: Optional[str]) -> None:\n self._cmd = None # type: Optional[List[str]]\n self._asciidoc = asciidoc\n self._website = website\n self._homedir = None # type: Optional[pathlib.Path]\n self._themedir = None # type: Optional[pathlib.Path]\n self._tempdir = None # type: Optional[pathlib.Path]\n self._failed = False\n\n def prepare(self) -> None:\n \"\"\"Get the asciidoc command and create the homedir to use.\"\"\"\n self._cmd = self._get_asciidoc_cmd()\n self._homedir = pathlib.Path(tempfile.mkdtemp())\n self._themedir = self._homedir / '.asciidoc' / 'themes' / 'qute'\n self._tempdir = self._homedir / 'tmp'\n self._tempdir.mkdir(parents=True)\n self._themedir.mkdir(parents=True)\n\n def cleanup(self) -> None:\n \"\"\"Clean up the temporary home directory for asciidoc.\"\"\"\n if self._homedir is not None and not self._failed:\n shutil.rmtree(str(self._homedir))\n\n def build(self) -> None:\n \"\"\"Build either the website or the docs.\"\"\"\n if self._website:\n self._build_website()\n else:\n self._build_docs()\n self._copy_images()\n\n def _build_docs(self) -> None:\n \"\"\"Render .asciidoc files to .html sites.\"\"\"\n files = [(REPO_ROOT / 'doc' / '{}.asciidoc'.format(f)),\n DOC_DIR / (f + \".html\")) for f in self.FILES]\n for src in (REPO_ROOT / 'doc' / 'help').glob('*.asciidoc'):\n dst = DOC_DIR / (src.stem + \".html\")\n files.append((src, dst))\n\n # patch image links to use local copy\n replacements = [\n (\"https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-big.png\",\n \"qute://help/img/cheatsheet-big.png\"),\n (\"https://raw.githubusercontent.com/qutebrowser/qutebrowser/master/doc/img/cheatsheet-small.png\",\n \"qute://help/img/cheatsheet-small.png\")\n ]\n asciidoc_args = ['-a', 'source-highlighter=pygments']\n\n for src, dst in files:\n assert self._tempdir is not None # for mypy\n modified_src = self._tempdir / src.name\n with modified_src.open('w', encoding='utf-8') as moded_f, \\\n src.open('r', encoding='utf-8') as f:\n for line in f:\n for orig, repl in replacements:\n line = line.replace(orig, repl)\n moded_f.write(line)\n self.call(modified_src, dst, *asciidoc_args)\n\n def _copy_images(self) -> None:\n \"\"\"Copy image files to qutebrowser/html/doc.\"\"\"\n print(\"Copying files...\")\n dst_path = DOC_DIR / 'img'\n dst_path.mkdir(exist_ok=True)\n for filename in ['cheatsheet-big.png', 'cheatsheet-small.png']:\n src = REPO_ROOT / 'doc' / 'img' / filename\n dst = dst_path / filename\n shutil.copy(str(src), str(dst))\n\n def _build_website_file(self, root: pathlib.Path, filename: str) -> None:\n \"\"\"Build a single website file.\"\"\"\n src = root / filename\n assert self._website is not None # for mypy\n dst = pathlib.Path(self._website)\n dst = dst / src.parent.relative_to('.') / (src.stem + \".html\")\n dst.parent.mkdir(exist_ok=True)\n\n assert self._tempdir is not None # for mypy\n modified_src = self._tempdir / src.name\n shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src)\n\n outfp = io.StringIO()\n\n header = modified_src.read_text(encoding='utf-8')\n header += \"\\n\\n\"\n\n with src.open('r', encoding='utf-8') as infp:\n outfp.write(\"\\n\\n\")\n hidden = False\n found_title = False\n title = \"\"\n last_line = \"\"\n\n for line in infp:\n line = line.rstrip()\n if line == '// QUTE_WEB_HIDE':\n assert not hidden\n hidden = True\n elif line == '// QUTE_WEB_HIDE_END':\n assert hidden\n hidden = False\n elif line == \"The Compiler <[email protected]>\":\n continue\n elif re.fullmatch(r':\\w+:.*', line):\n # asciidoc field\n continue\n\n if not found_title:\n if re.fullmatch(r'=+', line):\n line = line.replace('=', '-')\n found_title = True\n title = last_line + \" | qutebrowser\\n\"\n title += \"=\" * (len(title) - 1)\n elif re.fullmatch(r'= .+', line):\n line = '==' + line[1:]\n found_title = True\n title = last_line + \" | qutebrowser\\n\"\n title += \"=\" * (len(title) - 1)\n\n if not hidden:\n outfp.write(line.replace(\".asciidoc[\", \".html[\") + '\\n')\n last_line = line\n\n current_lines = outfp.getvalue()\n outfp.close()\n\n modified_str = title + \"\\n\\n\" + header + current_lines\n modified_src.write_text(modified_str, encoding='utf-8')\n\n asciidoc_args = ['--theme=qute', '-a toc', '-a toc-placement=manual',\n '-a', 'source-highlighter=pygments']\n self.call(modified_src, dst, *asciidoc_args)\n\n def _build_website(self) -> None:\n \"\"\"Prepare and build the website.\"\"\"\n theme_file = REPO_ROOT / 'www' / 'qute.css'\n assert self._themedir is not None # for mypy\n shutil.copy(theme_file, self._themedir)\n\n assert self._website is not None # for mypy\n outdir = pathlib.Path(self._website)\n\n for item_path in pathlib.Path().rglob('*.asciidoc'):\n if item_path.stem in ['header', 'OpenSans-License']:\n continue\n self._build_website_file(item_path.parent, item_path.name)\n\n copy = {'icons': 'icons', 'doc/img': 'doc/img', 'www/media': 'media/'}\n\n for src, dest in copy.items():\n full_dest = outdir / dest\n try:\n shutil.rmtree(full_dest)\n except FileNotFoundError:\n pass\n shutil.copytree(src, full_dest)\n\n for dst, link_name in [\n ('README.html', 'index.html'),\n ((REPO_ROOT / 'doc' / 'quickstart.html'),\n 'quickstart.html')]:\n assert isinstance(dst, (str, pathlib.Path)) # for mypy\n try:\n (outdir / link_name).symlink_to(dst)\n except FileExistsError:\n pass\n\n def _get_asciidoc_cmd(self) -> List[str]:\n \"\"\"Try to find out what commandline to use to invoke asciidoc.\"\"\"\n if self._asciidoc is not None:\n return self._asciidoc\n\n try:\n subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, check=True)\n except OSError:\n pass\n else:\n return ['asciidoc']\n\n try:\n subprocess.run(['asciidoc.py'], stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL, check=True)\n except OSError:\n pass\n else:\n return ['asciidoc.py']\n\n raise FileNotFoundError\n\n def call(self, src: pathlib.Path, dst: pathlib.Path, *args):\n \"\"\"Call asciidoc for the given files.\n\n Args:\n src: The source .asciidoc file.\n dst: The destination .html file, or None to auto-guess.\n *args: Additional arguments passed to asciidoc.\n \"\"\"\n print(\"Calling asciidoc for {}...\".format(src.name))\n assert self._cmd is not None # for mypy\n cmdline = self._cmd[:]\n if dst is not None:\n cmdline += ['--out-file', str(dst)]\n cmdline += args\n cmdline.append(str(src))\n\n # So the virtualenv's Pygments is found\n bin_path = pathlib.Path(sys.executable).parent\n\n try:\n env = os.environ.copy()\n env['HOME'] = str(self._homedir)\n env['PATH'] = str(bin_path) + os.pathsep + env['PATH']\n subprocess.run(cmdline, check=True, env=env)\n except (subprocess.CalledProcessError, OSError) as e:\n self._failed = True\n utils.print_error(str(e))\n print(\"Keeping modified sources in {}.\".format(self._homedir),\n file=sys.stderr)\n sys.exit(1)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command-line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--website', help=\"Build website into a given \"\n \"directory.\")\n parser.add_argument('--asciidoc', help=\"Full path to python and \"\n \"asciidoc.py. If not given, it's searched in PATH.\",\n nargs=2, required=False,\n metavar=('PYTHON', 'ASCIIDOC'))\n return parser.parse_args()\n\n\ndef run(**kwargs) -> None:\n \"\"\"Regenerate documentation.\"\"\"\n DOC_DIR.mkdir(exist_ok=True)\n\n asciidoc = AsciiDoc(**kwargs)\n try:\n asciidoc.prepare()\n except FileNotFoundError:\n utils.print_error(\"Could not find asciidoc! Please install it, or use \"\n \"the --asciidoc argument to point this script to \"\n \"the correct python/asciidoc.py location!\")\n sys.exit(1)\n\n try:\n asciidoc.build()\n finally:\n asciidoc.cleanup()\n\n\ndef main(colors: bool = False) -> None:\n \"\"\"Generate html files for the online documentation.\"\"\"\n utils.change_cwd()\n utils.use_color = colors\n args = parse_args()\n run(asciidoc=args.asciidoc, website=args.website)\n\n\nif __name__ == '__main__':\n main(colors=True)\n", "path": "scripts/asciidoc2html.py"}]}
3,929
654
gh_patches_debug_1469
rasdani/github-patches
git_diff
microsoft__DeepSpeed-5577
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] fp_quantizer is not correctly built when non-jit installation **Describe the bug** fp_quantizer is not correctly built when non-jit installation. **To Reproduce** Steps to reproduce the behavior: ``` DS_BUILD_FP_QUANTIZER=1 pip install deepspeed ``` install will succeed but ``` from deepspeed.ops.fp_quantizer import FP_Quantize FP_Quantize() ``` will raise `ImportError: dynamic module does not define module export function (PyInit_fp_quantizer_op)` **Expected behavior** Renaming csrc/fp_quantizer/quantize.cu may solve the issue. This restriction seems to be cause of the bug. > Note that setuptools cannot handle files with the same name but different extensions https://pytorch.org/tutorials/advanced/cpp_extension.html --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `op_builder/fp_quantizer.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 from .builder import CUDAOpBuilder, installed_cuda_version 7 8 9 class FPQuantizerBuilder(CUDAOpBuilder): 10 BUILD_VAR = "DS_BUILD_FP_QUANTIZER" 11 NAME = "fp_quantizer" 12 13 def __init__(self, name=None): 14 name = self.NAME if name is None else name 15 super().__init__(name=name) 16 17 def absolute_name(self): 18 return f'deepspeed.ops.fp_quantizer.{self.NAME}_op' 19 20 def is_compatible(self, verbose=True): 21 try: 22 import torch 23 except ImportError: 24 self.warning("Please install torch if trying to pre-compile inference kernels") 25 return False 26 27 cuda_okay = True 28 if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda 29 sys_cuda_major, _ = installed_cuda_version() 30 torch_cuda_major = int(torch.version.cuda.split('.')[0]) 31 cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda 32 if cuda_capability < 8: 33 self.warning("NVIDIA Inference is only supported on Ampere and newer architectures") 34 cuda_okay = False 35 if cuda_capability >= 8: 36 if torch_cuda_major < 11 or sys_cuda_major < 11: 37 self.warning("On Ampere and higher architectures please use CUDA 11+") 38 cuda_okay = False 39 return super().is_compatible(verbose) and cuda_okay 40 41 def filter_ccs(self, ccs): 42 ccs_retained = [] 43 ccs_pruned = [] 44 for cc in ccs: 45 if int(cc[0]) >= 8: 46 ccs_retained.append(cc) 47 else: 48 ccs_pruned.append(cc) 49 if len(ccs_pruned) > 0: 50 self.warning(f"Filtered compute capabilities {ccs_pruned}") 51 return ccs_retained 52 53 def sources(self): 54 return [ 55 "csrc/fp_quantizer/quantize.cu", 56 "csrc/fp_quantizer/quantize.cpp", 57 ] 58 59 def extra_ldflags(self): 60 return ['-lcurand'] 61 62 def include_paths(self): 63 return ['csrc/fp_quantizer/includes', 'csrc/includes'] 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py --- a/op_builder/fp_quantizer.py +++ b/op_builder/fp_quantizer.py @@ -52,8 +52,8 @@ def sources(self): return [ - "csrc/fp_quantizer/quantize.cu", - "csrc/fp_quantizer/quantize.cpp", + "csrc/fp_quantizer/fp_quantize.cu", + "csrc/fp_quantizer/fp_quantize.cpp", ] def extra_ldflags(self):
{"golden_diff": "diff --git a/op_builder/fp_quantizer.py b/op_builder/fp_quantizer.py\n--- a/op_builder/fp_quantizer.py\n+++ b/op_builder/fp_quantizer.py\n@@ -52,8 +52,8 @@\n \n def sources(self):\n return [\n- \"csrc/fp_quantizer/quantize.cu\",\n- \"csrc/fp_quantizer/quantize.cpp\",\n+ \"csrc/fp_quantizer/fp_quantize.cu\",\n+ \"csrc/fp_quantizer/fp_quantize.cpp\",\n ]\n \n def extra_ldflags(self):\n", "issue": "[BUG] fp_quantizer is not correctly built when non-jit installation\n**Describe the bug**\r\nfp_quantizer is not correctly built when non-jit installation.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\nDS_BUILD_FP_QUANTIZER=1 pip install deepspeed\r\n```\r\ninstall will succeed but\r\n```\r\nfrom deepspeed.ops.fp_quantizer import FP_Quantize\r\nFP_Quantize()\r\n```\r\nwill raise `ImportError: dynamic module does not define module export function (PyInit_fp_quantizer_op)`\r\n\r\n**Expected behavior**\r\n\r\nRenaming csrc/fp_quantizer/quantize.cu may solve the issue.\r\nThis restriction seems to be cause of the bug.\r\n> Note that setuptools cannot handle files with the same name but different extensions\r\nhttps://pytorch.org/tutorials/advanced/cpp_extension.html\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom .builder import CUDAOpBuilder, installed_cuda_version\n\n\nclass FPQuantizerBuilder(CUDAOpBuilder):\n BUILD_VAR = \"DS_BUILD_FP_QUANTIZER\"\n NAME = \"fp_quantizer\"\n\n def __init__(self, name=None):\n name = self.NAME if name is None else name\n super().__init__(name=name)\n\n def absolute_name(self):\n return f'deepspeed.ops.fp_quantizer.{self.NAME}_op'\n\n def is_compatible(self, verbose=True):\n try:\n import torch\n except ImportError:\n self.warning(\"Please install torch if trying to pre-compile inference kernels\")\n return False\n\n cuda_okay = True\n if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda\n sys_cuda_major, _ = installed_cuda_version()\n torch_cuda_major = int(torch.version.cuda.split('.')[0])\n cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda\n if cuda_capability < 8:\n self.warning(\"NVIDIA Inference is only supported on Ampere and newer architectures\")\n cuda_okay = False\n if cuda_capability >= 8:\n if torch_cuda_major < 11 or sys_cuda_major < 11:\n self.warning(\"On Ampere and higher architectures please use CUDA 11+\")\n cuda_okay = False\n return super().is_compatible(verbose) and cuda_okay\n\n def filter_ccs(self, ccs):\n ccs_retained = []\n ccs_pruned = []\n for cc in ccs:\n if int(cc[0]) >= 8:\n ccs_retained.append(cc)\n else:\n ccs_pruned.append(cc)\n if len(ccs_pruned) > 0:\n self.warning(f\"Filtered compute capabilities {ccs_pruned}\")\n return ccs_retained\n\n def sources(self):\n return [\n \"csrc/fp_quantizer/quantize.cu\",\n \"csrc/fp_quantizer/quantize.cpp\",\n ]\n\n def extra_ldflags(self):\n return ['-lcurand']\n\n def include_paths(self):\n return ['csrc/fp_quantizer/includes', 'csrc/includes']\n", "path": "op_builder/fp_quantizer.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom .builder import CUDAOpBuilder, installed_cuda_version\n\n\nclass FPQuantizerBuilder(CUDAOpBuilder):\n BUILD_VAR = \"DS_BUILD_FP_QUANTIZER\"\n NAME = \"fp_quantizer\"\n\n def __init__(self, name=None):\n name = self.NAME if name is None else name\n super().__init__(name=name)\n\n def absolute_name(self):\n return f'deepspeed.ops.fp_quantizer.{self.NAME}_op'\n\n def is_compatible(self, verbose=True):\n try:\n import torch\n except ImportError:\n self.warning(\"Please install torch if trying to pre-compile inference kernels\")\n return False\n\n cuda_okay = True\n if not self.is_rocm_pytorch() and torch.cuda.is_available(): #ignore-cuda\n sys_cuda_major, _ = installed_cuda_version()\n torch_cuda_major = int(torch.version.cuda.split('.')[0])\n cuda_capability = torch.cuda.get_device_properties(0).major #ignore-cuda\n if cuda_capability < 8:\n self.warning(\"NVIDIA Inference is only supported on Ampere and newer architectures\")\n cuda_okay = False\n if cuda_capability >= 8:\n if torch_cuda_major < 11 or sys_cuda_major < 11:\n self.warning(\"On Ampere and higher architectures please use CUDA 11+\")\n cuda_okay = False\n return super().is_compatible(verbose) and cuda_okay\n\n def filter_ccs(self, ccs):\n ccs_retained = []\n ccs_pruned = []\n for cc in ccs:\n if int(cc[0]) >= 8:\n ccs_retained.append(cc)\n else:\n ccs_pruned.append(cc)\n if len(ccs_pruned) > 0:\n self.warning(f\"Filtered compute capabilities {ccs_pruned}\")\n return ccs_retained\n\n def sources(self):\n return [\n \"csrc/fp_quantizer/fp_quantize.cu\",\n \"csrc/fp_quantizer/fp_quantize.cpp\",\n ]\n\n def extra_ldflags(self):\n return ['-lcurand']\n\n def include_paths(self):\n return ['csrc/fp_quantizer/includes', 'csrc/includes']\n", "path": "op_builder/fp_quantizer.py"}]}
1,063
131
gh_patches_debug_16543
rasdani/github-patches
git_diff
web2py__web2py-1496
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IS_EMPTY_OR validator returns incorrect "empty" value When setting default validators, the https://github.com/web2py/web2py/commit/bdbc053285b67fd3ee02f2ea862b30ca495f33e2 commit mistakenly sets the `null` attribute of the `IS_EMPTY_OR` validator to `''` for _all_ field types rather than just the string based field types. [This line](https://github.com/web2py/web2py/blob/1877f497309e71918aa78e1a1288cbe3cb5392ec/gluon/dal.py#L81): ``` requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None) ``` should instead be: ``` requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field.type in ('string', 'text', 'password') else None) ``` Notice, `field.type` rather than `field`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gluon/dal.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 | This file is part of the web2py Web Framework 6 | Copyrighted by Massimo Di Pierro <[email protected]> 7 | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 8 9 Takes care of adapting pyDAL to web2py's needs 10 ----------------------------------------------- 11 """ 12 13 from pydal import DAL as DAL 14 from pydal import Field 15 from pydal.objects import Row, Rows, Table, Query, Set, Expression 16 from pydal import SQLCustomType, geoPoint, geoLine, geoPolygon 17 18 def _default_validators(db, field): 19 """ 20 Field type validation, using web2py's validators mechanism. 21 22 makes sure the content of a field is in line with the declared 23 fieldtype 24 """ 25 from gluon import validators 26 field_type, field_length = field.type, field.length 27 requires = [] 28 29 if field_type in (('string', 'text', 'password')): 30 requires.append(validators.IS_LENGTH(field_length)) 31 elif field_type == 'json': 32 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 33 elif field_type == 'double' or field_type == 'float': 34 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 35 elif field_type == 'integer': 36 requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31)) 37 elif field_type == 'bigint': 38 requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63)) 39 elif field_type.startswith('decimal'): 40 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 41 elif field_type == 'date': 42 requires.append(validators.IS_DATE()) 43 elif field_type == 'time': 44 requires.append(validators.IS_TIME()) 45 elif field_type == 'datetime': 46 requires.append(validators.IS_DATETIME()) 47 elif db and field_type.startswith('reference') and \ 48 field_type.find('.') < 0 and \ 49 field_type[10:] in db.tables: 50 referenced = db[field_type[10:]] 51 if hasattr(referenced, '_format') and referenced._format: 52 requires = validators.IS_IN_DB(db, referenced._id, 53 referenced._format) 54 if field.unique: 55 requires._and = validators.IS_NOT_IN_DB(db, field) 56 if field.tablename == field_type[10:]: 57 return validators.IS_EMPTY_OR(requires) 58 return requires 59 elif db and field_type.startswith('list:reference') and \ 60 field_type.find('.') < 0 and \ 61 field_type[15:] in db.tables: 62 referenced = db[field_type[15:]] 63 if hasattr(referenced, '_format') and referenced._format: 64 requires = validators.IS_IN_DB(db, referenced._id, 65 referenced._format, multiple=True) 66 else: 67 requires = validators.IS_IN_DB(db, referenced._id, 68 multiple=True) 69 if field.unique: 70 requires._and = validators.IS_NOT_IN_DB(db, field) 71 if not field.notnull: 72 requires = validators.IS_EMPTY_OR(requires) 73 return requires 74 # does not get here for reference and list:reference 75 if field.unique: 76 requires.insert(0, validators.IS_NOT_IN_DB(db, field)) 77 excluded_fields = ['string', 'upload', 'text', 'password', 'boolean'] 78 if (field.notnull or field.unique) and not field_type in excluded_fields: 79 requires.insert(0, validators.IS_NOT_EMPTY()) 80 elif not field.notnull and not field.unique and requires: 81 requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None) 82 return requires 83 84 from gluon.serializers import custom_json, xml 85 from gluon.utils import web2py_uuid 86 from gluon import sqlhtml 87 88 89 DAL.serializers = {'json': custom_json, 'xml': xml} 90 DAL.validators_method = _default_validators 91 DAL.uuid = lambda x: web2py_uuid() 92 DAL.representers = { 93 'rows_render': sqlhtml.represent, 94 'rows_xml': sqlhtml.SQLTABLE 95 } 96 DAL.Field = Field 97 DAL.Table = Table 98 99 #: add web2py contrib drivers to pyDAL 100 from pydal.drivers import DRIVERS 101 if not DRIVERS.get('pymysql'): 102 try: 103 from .contrib import pymysql 104 DRIVERS['pymysql'] = pymysql 105 except: 106 pass 107 if not DRIVERS.get('pyodbc'): 108 try: 109 from .contrib import pypyodbc as pyodbc 110 DRIVERS['pyodbc'] = pyodbc 111 except: 112 pass 113 if not DRIVERS.get('pg8000'): 114 try: 115 from .contrib import pg8000 116 DRIVERS['pg8000'] = pg8000 117 except: 118 pass 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gluon/dal.py b/gluon/dal.py --- a/gluon/dal.py +++ b/gluon/dal.py @@ -75,10 +75,10 @@ if field.unique: requires.insert(0, validators.IS_NOT_IN_DB(db, field)) excluded_fields = ['string', 'upload', 'text', 'password', 'boolean'] - if (field.notnull or field.unique) and not field_type in excluded_fields: + if (field.notnull or field.unique) and field_type not in excluded_fields: requires.insert(0, validators.IS_NOT_EMPTY()) elif not field.notnull and not field.unique and requires: - requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None) + requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field.type in ('string', 'text', 'password') else None) return requires from gluon.serializers import custom_json, xml
{"golden_diff": "diff --git a/gluon/dal.py b/gluon/dal.py\n--- a/gluon/dal.py\n+++ b/gluon/dal.py\n@@ -75,10 +75,10 @@\n if field.unique:\n requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n- if (field.notnull or field.unique) and not field_type in excluded_fields:\n+ if (field.notnull or field.unique) and field_type not in excluded_fields:\n requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n- requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n+ requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field.type in ('string', 'text', 'password') else None)\n return requires\n \n from gluon.serializers import custom_json, xml\n", "issue": "IS_EMPTY_OR validator returns incorrect \"empty\" value\nWhen setting default validators, the https://github.com/web2py/web2py/commit/bdbc053285b67fd3ee02f2ea862b30ca495f33e2 commit mistakenly sets the `null` attribute of the `IS_EMPTY_OR` validator to `''` for _all_ field types rather than just the string based field types.\n\n[This line](https://github.com/web2py/web2py/blob/1877f497309e71918aa78e1a1288cbe3cb5392ec/gluon/dal.py#L81):\n\n```\n requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n```\n\nshould instead be:\n\n```\n requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field.type in ('string', 'text', 'password') else None)\n```\n\nNotice, `field.type` rather than `field`.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nTakes care of adapting pyDAL to web2py's needs\n-----------------------------------------------\n\"\"\"\n\nfrom pydal import DAL as DAL\nfrom pydal import Field\nfrom pydal.objects import Row, Rows, Table, Query, Set, Expression\nfrom pydal import SQLCustomType, geoPoint, geoLine, geoPolygon\n\ndef _default_validators(db, field):\n \"\"\"\n Field type validation, using web2py's validators mechanism.\n\n makes sure the content of a field is in line with the declared\n fieldtype\n \"\"\"\n from gluon import validators\n field_type, field_length = field.type, field.length\n requires = []\n\n if field_type in (('string', 'text', 'password')):\n requires.append(validators.IS_LENGTH(field_length))\n elif field_type == 'json':\n requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))\n elif field_type == 'double' or field_type == 'float':\n requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))\n elif field_type == 'integer':\n requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))\n elif field_type == 'bigint':\n requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))\n elif field_type.startswith('decimal'):\n requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))\n elif field_type == 'date':\n requires.append(validators.IS_DATE())\n elif field_type == 'time':\n requires.append(validators.IS_TIME())\n elif field_type == 'datetime':\n requires.append(validators.IS_DATETIME())\n elif db and field_type.startswith('reference') and \\\n field_type.find('.') < 0 and \\\n field_type[10:] in db.tables:\n referenced = db[field_type[10:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if field.tablename == field_type[10:]:\n return validators.IS_EMPTY_OR(requires)\n return requires\n elif db and field_type.startswith('list:reference') and \\\n field_type.find('.') < 0 and \\\n field_type[15:] in db.tables:\n referenced = db[field_type[15:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format, multiple=True)\n else:\n requires = validators.IS_IN_DB(db, referenced._id,\n multiple=True)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if not field.notnull:\n requires = validators.IS_EMPTY_OR(requires)\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n if (field.notnull or field.unique) and not field_type in excluded_fields:\n requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field in ('string', 'text', 'password') else None)\n return requires\n\nfrom gluon.serializers import custom_json, xml\nfrom gluon.utils import web2py_uuid\nfrom gluon import sqlhtml\n\n\nDAL.serializers = {'json': custom_json, 'xml': xml}\nDAL.validators_method = _default_validators\nDAL.uuid = lambda x: web2py_uuid()\nDAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n}\nDAL.Field = Field\nDAL.Table = Table\n\n#: add web2py contrib drivers to pyDAL\nfrom pydal.drivers import DRIVERS\nif not DRIVERS.get('pymysql'):\n try:\n from .contrib import pymysql\n DRIVERS['pymysql'] = pymysql\n except:\n pass\nif not DRIVERS.get('pyodbc'):\n try:\n from .contrib import pypyodbc as pyodbc\n DRIVERS['pyodbc'] = pyodbc\n except:\n pass\nif not DRIVERS.get('pg8000'):\n try:\n from .contrib import pg8000\n DRIVERS['pg8000'] = pg8000\n except:\n pass\n", "path": "gluon/dal.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n| This file is part of the web2py Web Framework\n| Copyrighted by Massimo Di Pierro <[email protected]>\n| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\nTakes care of adapting pyDAL to web2py's needs\n-----------------------------------------------\n\"\"\"\n\nfrom pydal import DAL as DAL\nfrom pydal import Field\nfrom pydal.objects import Row, Rows, Table, Query, Set, Expression\nfrom pydal import SQLCustomType, geoPoint, geoLine, geoPolygon\n\ndef _default_validators(db, field):\n \"\"\"\n Field type validation, using web2py's validators mechanism.\n\n makes sure the content of a field is in line with the declared\n fieldtype\n \"\"\"\n from gluon import validators\n field_type, field_length = field.type, field.length\n requires = []\n\n if field_type in (('string', 'text', 'password')):\n requires.append(validators.IS_LENGTH(field_length))\n elif field_type == 'json':\n requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))\n elif field_type == 'double' or field_type == 'float':\n requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))\n elif field_type == 'integer':\n requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))\n elif field_type == 'bigint':\n requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))\n elif field_type.startswith('decimal'):\n requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))\n elif field_type == 'date':\n requires.append(validators.IS_DATE())\n elif field_type == 'time':\n requires.append(validators.IS_TIME())\n elif field_type == 'datetime':\n requires.append(validators.IS_DATETIME())\n elif db and field_type.startswith('reference') and \\\n field_type.find('.') < 0 and \\\n field_type[10:] in db.tables:\n referenced = db[field_type[10:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if field.tablename == field_type[10:]:\n return validators.IS_EMPTY_OR(requires)\n return requires\n elif db and field_type.startswith('list:reference') and \\\n field_type.find('.') < 0 and \\\n field_type[15:] in db.tables:\n referenced = db[field_type[15:]]\n if hasattr(referenced, '_format') and referenced._format:\n requires = validators.IS_IN_DB(db, referenced._id,\n referenced._format, multiple=True)\n else:\n requires = validators.IS_IN_DB(db, referenced._id,\n multiple=True)\n if field.unique:\n requires._and = validators.IS_NOT_IN_DB(db, field)\n if not field.notnull:\n requires = validators.IS_EMPTY_OR(requires)\n return requires\n # does not get here for reference and list:reference\n if field.unique:\n requires.insert(0, validators.IS_NOT_IN_DB(db, field))\n excluded_fields = ['string', 'upload', 'text', 'password', 'boolean']\n if (field.notnull or field.unique) and field_type not in excluded_fields:\n requires.insert(0, validators.IS_NOT_EMPTY())\n elif not field.notnull and not field.unique and requires:\n requires[0] = validators.IS_EMPTY_OR(requires[0], null='' if field.type in ('string', 'text', 'password') else None)\n return requires\n\nfrom gluon.serializers import custom_json, xml\nfrom gluon.utils import web2py_uuid\nfrom gluon import sqlhtml\n\n\nDAL.serializers = {'json': custom_json, 'xml': xml}\nDAL.validators_method = _default_validators\nDAL.uuid = lambda x: web2py_uuid()\nDAL.representers = {\n 'rows_render': sqlhtml.represent,\n 'rows_xml': sqlhtml.SQLTABLE\n}\nDAL.Field = Field\nDAL.Table = Table\n\n#: add web2py contrib drivers to pyDAL\nfrom pydal.drivers import DRIVERS\nif not DRIVERS.get('pymysql'):\n try:\n from .contrib import pymysql\n DRIVERS['pymysql'] = pymysql\n except:\n pass\nif not DRIVERS.get('pyodbc'):\n try:\n from .contrib import pypyodbc as pyodbc\n DRIVERS['pyodbc'] = pyodbc\n except:\n pass\nif not DRIVERS.get('pg8000'):\n try:\n from .contrib import pg8000\n DRIVERS['pg8000'] = pg8000\n except:\n pass\n", "path": "gluon/dal.py"}]}
1,832
233
gh_patches_debug_4775
rasdani/github-patches
git_diff
nilearn__nilearn-1949
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- line_width param in view_connectome has no effect The parameter `line_width` in `html_connectome.view_connectome()` is not associated with any functionality. Should we remove it or is there a reason for it to be there? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nilearn/plotting/html_connectome.py` Content: ``` 1 import functools 2 import json 3 import warnings 4 5 import numpy as np 6 from scipy import sparse 7 8 from nilearn._utils import replace_parameters 9 from .. import datasets 10 from . import cm 11 12 from .js_plotting_utils import (add_js_lib, HTMLDocument, mesh_to_plotly, 13 encode, colorscale, get_html_template, 14 to_color_strings) 15 16 17 class ConnectomeView(HTMLDocument): 18 pass 19 20 21 def _prepare_line(edges, nodes): 22 path_edges = np.zeros(len(edges) * 3, dtype=int) 23 path_edges[::3] = edges 24 path_edges[1::3] = edges 25 path_nodes = np.zeros(len(nodes) * 3, dtype=int) 26 path_nodes[::3] = nodes[:, 0] 27 path_nodes[1::3] = nodes[:, 1] 28 return path_edges, path_nodes 29 30 31 def _get_connectome(adjacency_matrix, coords, threshold=None, 32 marker_size=None, cmap=cm.cold_hot, symmetric_cmap=True): 33 connectome = {} 34 coords = np.asarray(coords, dtype='<f4') 35 adjacency_matrix = adjacency_matrix.copy() 36 colors = colorscale( 37 cmap, adjacency_matrix.ravel(), threshold=threshold, 38 symmetric_cmap=symmetric_cmap) 39 connectome['colorscale'] = colors['colors'] 40 connectome['cmin'] = float(colors['vmin']) 41 connectome['cmax'] = float(colors['vmax']) 42 if threshold is not None: 43 adjacency_matrix[ 44 np.abs(adjacency_matrix) <= colors['abs_threshold']] = 0 45 s = sparse.coo_matrix(adjacency_matrix) 46 nodes = np.asarray([s.row, s.col], dtype=int).T 47 edges = np.arange(len(nodes)) 48 path_edges, path_nodes = _prepare_line(edges, nodes) 49 connectome["_con_w"] = encode(np.asarray(s.data, dtype='<f4')[path_edges]) 50 c = coords[path_nodes] 51 if np.ndim(marker_size) > 0: 52 marker_size = np.asarray(marker_size) 53 marker_size = marker_size[path_nodes] 54 x, y, z = c.T 55 for coord, cname in [(x, "x"), (y, "y"), (z, "z")]: 56 connectome["_con_{}".format(cname)] = encode( 57 np.asarray(coord, dtype='<f4')) 58 connectome["markers_only"] = False 59 if hasattr(marker_size, 'tolist'): 60 marker_size = marker_size.tolist() 61 connectome['marker_size'] = marker_size 62 return connectome 63 64 65 def _get_markers(coords, colors): 66 connectome = {} 67 coords = np.asarray(coords, dtype='<f4') 68 x, y, z = coords.T 69 for coord, cname in [(x, "x"), (y, "y"), (z, "z")]: 70 connectome["_con_{}".format(cname)] = encode( 71 np.asarray(coord, dtype='<f4')) 72 connectome["marker_color"] = to_color_strings(colors) 73 connectome["markers_only"] = True 74 return connectome 75 76 77 def _make_connectome_html(connectome_info, embed_js=True): 78 plot_info = {"connectome": connectome_info} 79 mesh = datasets.fetch_surf_fsaverage() 80 for hemi in ['pial_left', 'pial_right']: 81 plot_info[hemi] = mesh_to_plotly(mesh[hemi]) 82 as_json = json.dumps(plot_info) 83 as_html = get_html_template( 84 'connectome_plot_template.html').safe_substitute( 85 {'INSERT_CONNECTOME_JSON_HERE': as_json}) 86 as_html = add_js_lib(as_html, embed_js=embed_js) 87 return ConnectomeView(as_html) 88 89 90 def _replacement_params_view_connectome(): 91 """ Returns a dict containing deprecated & replacement parameters 92 as key-value pair for view_connectome(). 93 Avoids cluttering the global namespace. 94 """ 95 return { 96 'coords': 'node_coords', 97 'threshold': 'edge_threshold', 98 'cmap': 'edge_cmap', 99 'marker_size': 'node_size', 100 } 101 102 @replace_parameters(replacement_params=_replacement_params_view_connectome(), 103 end_version='0.6.0', 104 lib_name='Nilearn', 105 ) 106 def view_connectome(adjacency_matrix, node_coords, edge_threshold=None, 107 edge_cmap=cm.bwr, symmetric_cmap=True, 108 linewidth=6., node_size=3., 109 ): 110 """ 111 Insert a 3d plot of a connectome into an HTML page. 112 113 Parameters 114 ---------- 115 adjacency_matrix : ndarray, shape=(n_nodes, n_nodes) 116 the weights of the edges. 117 118 node_coords : ndarray, shape=(n_nodes, 3) 119 the coordinates of the nodes in MNI space. 120 121 edge_threshold : str, number or None, optional (default=None) 122 If None, no thresholding. 123 If it is a number only connections of amplitude greater 124 than threshold will be shown. 125 If it is a string it must finish with a percent sign, 126 e.g. "25.3%", and only connections of amplitude above the 127 given percentile will be shown. 128 129 edge_cmap : str or matplotlib colormap, optional 130 131 symmetric_cmap : bool, optional (default=True) 132 Make colormap symmetric (ranging from -vmax to vmax). 133 134 linewidth : float, optional (default=6.) 135 Width of the lines that show connections. 136 137 node_size : float, optional (default=3.) 138 Size of the markers showing the seeds in pixels. 139 140 Returns 141 ------- 142 ConnectomeView : plot of the connectome. 143 It can be saved as an html page or rendered (transparently) by the 144 Jupyter notebook. Useful methods are : 145 146 - 'resize' to resize the plot displayed in a Jupyter notebook 147 - 'save_as_html' to save the plot to a file 148 - 'open_in_browser' to save the plot and open it in a web browser. 149 150 See Also 151 -------- 152 nilearn.plotting.plot_connectome: 153 projected views of a connectome in a glass brain. 154 155 nilearn.plotting.view_markers: 156 interactive plot of colored markers 157 158 nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf: 159 interactive view of statistical maps or surface atlases on the cortical 160 surface. 161 162 """ 163 connectome_info = _get_connectome( 164 adjacency_matrix, node_coords, threshold=edge_threshold, cmap=edge_cmap, 165 symmetric_cmap=symmetric_cmap, marker_size=node_size) 166 return _make_connectome_html(connectome_info) 167 168 169 def _replacement_params_view_markers(): 170 """ Returns a dict containing deprecated & replacement parameters 171 as key-value pair for view_markers(). 172 Avoids cluttering the global namespace. 173 """ 174 return {'coords': 'marker_coords', 175 'colors': 'marker_color', 176 } 177 178 179 @replace_parameters(replacement_params=_replacement_params_view_markers(), 180 end_version='0.6.0', 181 lib_name='Nilearn', 182 ) 183 def view_markers(marker_coords, marker_color=None, marker_size=5.): 184 """ 185 Insert a 3d plot of markers in a brain into an HTML page. 186 187 Parameters 188 ---------- 189 marker_coords : ndarray, shape=(n_nodes, 3) 190 the coordinates of the nodes in MNI space. 191 192 marker_color : ndarray, shape=(n_nodes,) 193 colors of the markers: list of strings, hex rgb or rgba strings, rgb 194 triplets, or rgba triplets (i.e. formats accepted by matplotlib, see 195 https://matplotlib.org/users/colors.html#specifying-colors) 196 197 marker_size : float or array-like, optional (default=3.) 198 Size of the markers showing the seeds in pixels. 199 200 Returns 201 ------- 202 ConnectomeView : plot of the markers. 203 It can be saved as an html page or rendered (transparently) by the 204 Jupyter notebook. Useful methods are : 205 206 - 'resize' to resize the plot displayed in a Jupyter notebook 207 - 'save_as_html' to save the plot to a file 208 - 'open_in_browser' to save the plot and open it in a web browser. 209 210 See Also 211 -------- 212 nilearn.plotting.plot_connectome: 213 projected views of a connectome in a glass brain. 214 215 nilearn.plotting.view_connectome: 216 interactive plot of a connectome. 217 218 nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf: 219 interactive view of statistical maps or surface atlases on the cortical 220 surface. 221 222 """ 223 if marker_color is None: 224 marker_color = ['red' for i in range(len(marker_coords))] 225 connectome_info = _get_markers(marker_coords, marker_color) 226 if hasattr(marker_size, 'tolist'): 227 marker_size = marker_size.tolist() 228 connectome_info["marker_size"] = marker_size 229 return _make_connectome_html(connectome_info) 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nilearn/plotting/html_connectome.py b/nilearn/plotting/html_connectome.py --- a/nilearn/plotting/html_connectome.py +++ b/nilearn/plotting/html_connectome.py @@ -159,6 +159,7 @@ connectome_info = _get_connectome( adjacency_matrix, node_coords, threshold=edge_threshold, cmap=edge_cmap, symmetric_cmap=symmetric_cmap, marker_size=node_size) + connectome_info['line_width'] = linewidth return _make_connectome_html(connectome_info)
{"golden_diff": "diff --git a/nilearn/plotting/html_connectome.py b/nilearn/plotting/html_connectome.py\n--- a/nilearn/plotting/html_connectome.py\n+++ b/nilearn/plotting/html_connectome.py\n@@ -159,6 +159,7 @@\n connectome_info = _get_connectome(\n adjacency_matrix, node_coords, threshold=edge_threshold, cmap=edge_cmap,\n symmetric_cmap=symmetric_cmap, marker_size=node_size)\n+ connectome_info['line_width'] = linewidth\n return _make_connectome_html(connectome_info)\n", "issue": "line_width param in view_connectome has no effect\nThe parameter `line_width` in `html_connectome.view_connectome()` is not associated with any functionality.\r\nShould we remove it or is there a reason for it to be there?\n", "before_files": [{"content": "import functools\nimport json\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom nilearn._utils import replace_parameters\nfrom .. import datasets\nfrom . import cm\n\nfrom .js_plotting_utils import (add_js_lib, HTMLDocument, mesh_to_plotly,\n encode, colorscale, get_html_template,\n to_color_strings)\n\n\nclass ConnectomeView(HTMLDocument):\n pass\n\n\ndef _prepare_line(edges, nodes):\n path_edges = np.zeros(len(edges) * 3, dtype=int)\n path_edges[::3] = edges\n path_edges[1::3] = edges\n path_nodes = np.zeros(len(nodes) * 3, dtype=int)\n path_nodes[::3] = nodes[:, 0]\n path_nodes[1::3] = nodes[:, 1]\n return path_edges, path_nodes\n\n\ndef _get_connectome(adjacency_matrix, coords, threshold=None,\n marker_size=None, cmap=cm.cold_hot, symmetric_cmap=True):\n connectome = {}\n coords = np.asarray(coords, dtype='<f4')\n adjacency_matrix = adjacency_matrix.copy()\n colors = colorscale(\n cmap, adjacency_matrix.ravel(), threshold=threshold,\n symmetric_cmap=symmetric_cmap)\n connectome['colorscale'] = colors['colors']\n connectome['cmin'] = float(colors['vmin'])\n connectome['cmax'] = float(colors['vmax'])\n if threshold is not None:\n adjacency_matrix[\n np.abs(adjacency_matrix) <= colors['abs_threshold']] = 0\n s = sparse.coo_matrix(adjacency_matrix)\n nodes = np.asarray([s.row, s.col], dtype=int).T\n edges = np.arange(len(nodes))\n path_edges, path_nodes = _prepare_line(edges, nodes)\n connectome[\"_con_w\"] = encode(np.asarray(s.data, dtype='<f4')[path_edges])\n c = coords[path_nodes]\n if np.ndim(marker_size) > 0:\n marker_size = np.asarray(marker_size)\n marker_size = marker_size[path_nodes]\n x, y, z = c.T\n for coord, cname in [(x, \"x\"), (y, \"y\"), (z, \"z\")]:\n connectome[\"_con_{}\".format(cname)] = encode(\n np.asarray(coord, dtype='<f4'))\n connectome[\"markers_only\"] = False\n if hasattr(marker_size, 'tolist'):\n marker_size = marker_size.tolist()\n connectome['marker_size'] = marker_size\n return connectome\n\n\ndef _get_markers(coords, colors):\n connectome = {}\n coords = np.asarray(coords, dtype='<f4')\n x, y, z = coords.T\n for coord, cname in [(x, \"x\"), (y, \"y\"), (z, \"z\")]:\n connectome[\"_con_{}\".format(cname)] = encode(\n np.asarray(coord, dtype='<f4'))\n connectome[\"marker_color\"] = to_color_strings(colors)\n connectome[\"markers_only\"] = True\n return connectome\n\n\ndef _make_connectome_html(connectome_info, embed_js=True):\n plot_info = {\"connectome\": connectome_info}\n mesh = datasets.fetch_surf_fsaverage()\n for hemi in ['pial_left', 'pial_right']:\n plot_info[hemi] = mesh_to_plotly(mesh[hemi])\n as_json = json.dumps(plot_info)\n as_html = get_html_template(\n 'connectome_plot_template.html').safe_substitute(\n {'INSERT_CONNECTOME_JSON_HERE': as_json})\n as_html = add_js_lib(as_html, embed_js=embed_js)\n return ConnectomeView(as_html)\n\n\ndef _replacement_params_view_connectome():\n \"\"\" Returns a dict containing deprecated & replacement parameters\n as key-value pair for view_connectome().\n Avoids cluttering the global namespace.\n \"\"\"\n return {\n 'coords': 'node_coords',\n 'threshold': 'edge_threshold',\n 'cmap': 'edge_cmap',\n 'marker_size': 'node_size',\n }\n\n@replace_parameters(replacement_params=_replacement_params_view_connectome(),\n end_version='0.6.0',\n lib_name='Nilearn',\n )\ndef view_connectome(adjacency_matrix, node_coords, edge_threshold=None,\n edge_cmap=cm.bwr, symmetric_cmap=True,\n linewidth=6., node_size=3.,\n ):\n \"\"\"\n Insert a 3d plot of a connectome into an HTML page.\n\n Parameters\n ----------\n adjacency_matrix : ndarray, shape=(n_nodes, n_nodes)\n the weights of the edges.\n\n node_coords : ndarray, shape=(n_nodes, 3)\n the coordinates of the nodes in MNI space.\n\n edge_threshold : str, number or None, optional (default=None)\n If None, no thresholding.\n If it is a number only connections of amplitude greater\n than threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only connections of amplitude above the\n given percentile will be shown.\n\n edge_cmap : str or matplotlib colormap, optional\n\n symmetric_cmap : bool, optional (default=True)\n Make colormap symmetric (ranging from -vmax to vmax).\n\n linewidth : float, optional (default=6.)\n Width of the lines that show connections.\n\n node_size : float, optional (default=3.)\n Size of the markers showing the seeds in pixels.\n\n Returns\n -------\n ConnectomeView : plot of the connectome.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.plot_connectome:\n projected views of a connectome in a glass brain.\n\n nilearn.plotting.view_markers:\n interactive plot of colored markers\n\n nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:\n interactive view of statistical maps or surface atlases on the cortical\n surface.\n\n \"\"\"\n connectome_info = _get_connectome(\n adjacency_matrix, node_coords, threshold=edge_threshold, cmap=edge_cmap,\n symmetric_cmap=symmetric_cmap, marker_size=node_size)\n return _make_connectome_html(connectome_info)\n\n\ndef _replacement_params_view_markers():\n \"\"\" Returns a dict containing deprecated & replacement parameters\n as key-value pair for view_markers().\n Avoids cluttering the global namespace.\n \"\"\"\n return {'coords': 'marker_coords',\n 'colors': 'marker_color',\n }\n\n\n@replace_parameters(replacement_params=_replacement_params_view_markers(),\n end_version='0.6.0',\n lib_name='Nilearn',\n )\ndef view_markers(marker_coords, marker_color=None, marker_size=5.):\n \"\"\"\n Insert a 3d plot of markers in a brain into an HTML page.\n\n Parameters\n ----------\n marker_coords : ndarray, shape=(n_nodes, 3)\n the coordinates of the nodes in MNI space.\n\n marker_color : ndarray, shape=(n_nodes,)\n colors of the markers: list of strings, hex rgb or rgba strings, rgb\n triplets, or rgba triplets (i.e. formats accepted by matplotlib, see\n https://matplotlib.org/users/colors.html#specifying-colors)\n\n marker_size : float or array-like, optional (default=3.)\n Size of the markers showing the seeds in pixels.\n\n Returns\n -------\n ConnectomeView : plot of the markers.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.plot_connectome:\n projected views of a connectome in a glass brain.\n\n nilearn.plotting.view_connectome:\n interactive plot of a connectome.\n\n nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:\n interactive view of statistical maps or surface atlases on the cortical\n surface.\n\n \"\"\"\n if marker_color is None:\n marker_color = ['red' for i in range(len(marker_coords))]\n connectome_info = _get_markers(marker_coords, marker_color)\n if hasattr(marker_size, 'tolist'):\n marker_size = marker_size.tolist()\n connectome_info[\"marker_size\"] = marker_size\n return _make_connectome_html(connectome_info)\n", "path": "nilearn/plotting/html_connectome.py"}], "after_files": [{"content": "import functools\nimport json\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom .. import datasets\nfrom . import cm\n\nfrom .js_plotting_utils import (add_js_lib, HTMLDocument, mesh_to_plotly,\n encode, colorscale, get_html_template,\n to_color_strings)\n\n\nclass ConnectomeView(HTMLDocument):\n pass\n\n\ndef _prepare_line(edges, nodes):\n path_edges = np.zeros(len(edges) * 3, dtype=int)\n path_edges[::3] = edges\n path_edges[1::3] = edges\n path_nodes = np.zeros(len(nodes) * 3, dtype=int)\n path_nodes[::3] = nodes[:, 0]\n path_nodes[1::3] = nodes[:, 1]\n return path_edges, path_nodes\n\n\ndef _get_connectome(adjacency_matrix, coords, threshold=None,\n marker_size=None, cmap=cm.cold_hot, symmetric_cmap=True):\n connectome = {}\n coords = np.asarray(coords, dtype='<f4')\n adjacency_matrix = adjacency_matrix.copy()\n colors = colorscale(\n cmap, adjacency_matrix.ravel(), threshold=threshold,\n symmetric_cmap=symmetric_cmap)\n connectome['colorscale'] = colors['colors']\n connectome['cmin'] = float(colors['vmin'])\n connectome['cmax'] = float(colors['vmax'])\n if threshold is not None:\n adjacency_matrix[\n np.abs(adjacency_matrix) <= colors['abs_threshold']] = 0\n s = sparse.coo_matrix(adjacency_matrix)\n nodes = np.asarray([s.row, s.col], dtype=int).T\n edges = np.arange(len(nodes))\n path_edges, path_nodes = _prepare_line(edges, nodes)\n connectome[\"_con_w\"] = encode(np.asarray(s.data, dtype='<f4')[path_edges])\n c = coords[path_nodes]\n if np.ndim(marker_size) > 0:\n marker_size = np.asarray(marker_size)\n marker_size = marker_size[path_nodes]\n x, y, z = c.T\n for coord, cname in [(x, \"x\"), (y, \"y\"), (z, \"z\")]:\n connectome[\"_con_{}\".format(cname)] = encode(\n np.asarray(coord, dtype='<f4'))\n connectome[\"markers_only\"] = False\n if hasattr(marker_size, 'tolist'):\n marker_size = marker_size.tolist()\n connectome['marker_size'] = marker_size\n return connectome\n\n\ndef _get_markers(coords, colors):\n connectome = {}\n coords = np.asarray(coords, dtype='<f4')\n x, y, z = coords.T\n for coord, cname in [(x, \"x\"), (y, \"y\"), (z, \"z\")]:\n connectome[\"_con_{}\".format(cname)] = encode(\n np.asarray(coord, dtype='<f4'))\n connectome[\"marker_color\"] = to_color_strings(colors)\n connectome[\"markers_only\"] = True\n return connectome\n\n\ndef _make_connectome_html(connectome_info, embed_js=True):\n plot_info = {\"connectome\": connectome_info}\n mesh = datasets.fetch_surf_fsaverage()\n for hemi in ['pial_left', 'pial_right']:\n plot_info[hemi] = mesh_to_plotly(mesh[hemi])\n as_json = json.dumps(plot_info)\n as_html = get_html_template(\n 'connectome_plot_template.html').safe_substitute(\n {'INSERT_CONNECTOME_JSON_HERE': as_json})\n as_html = add_js_lib(as_html, embed_js=embed_js)\n return ConnectomeView(as_html)\n\n\ndef _deprecate_params_view_connectome(func):\n \"\"\" Decorator to deprecate specific parameters in view_connectome()\n without modifying view_connectome().\n \"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n _warn_deprecated_params_view_connectome(kwargs)\n kwargs = _transfer_deprecated_param_vals_view_connectome(kwargs)\n return func(*args, **kwargs)\n \n return wrapper\n\n\n@_deprecate_params_view_connectome\ndef view_connectome(adjacency_matrix, node_coords, edge_threshold=None,\n edge_cmap=cm.bwr, symmetric_cmap=True,\n linewidth=6., node_size=3.,\n **kwargs):\n \"\"\"\n Insert a 3d plot of a connectome into an HTML page.\n\n Parameters\n ----------\n adjacency_matrix : ndarray, shape=(n_nodes, n_nodes)\n the weights of the edges.\n\n node_coords : ndarray, shape=(n_nodes, 3)\n the coordinates of the nodes in MNI space.\n\n edge_threshold : str, number or None, optional (default=None)\n If None, no thresholding.\n If it is a number only connections of amplitude greater\n than threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only connections of amplitude above the\n given percentile will be shown.\n\n edge_cmap : str or matplotlib colormap, optional\n\n symmetric_cmap : bool, optional (default=True)\n Make colormap symmetric (ranging from -vmax to vmax).\n\n linewidth : float, optional (default=6.)\n Width of the lines that show connections.\n\n node_size : float, optional (default=3.)\n Size of the markers showing the seeds in pixels.\n\n Returns\n -------\n ConnectomeView : plot of the connectome.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.plot_connectome:\n projected views of a connectome in a glass brain.\n\n nilearn.plotting.view_markers:\n interactive plot of colored markers\n\n nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:\n interactive view of statistical maps or surface atlases on the cortical\n surface.\n\n \"\"\"\n connectome_info = _get_connectome(\n adjacency_matrix, node_coords, threshold=edge_threshold, cmap=edge_cmap,\n symmetric_cmap=symmetric_cmap, marker_size=node_size)\n connectome_info['line_width'] = linewidth\n return _make_connectome_html(connectome_info)\n\n\ndef _warn_deprecated_params_view_connectome(kwargs):\n \"\"\" For view_connectome(), raises warnings about deprecated parameters.\n \"\"\"\n all_deprecated_params = {'coords': 'node_coords',\n 'threshold': 'edge_threshold',\n 'cmap': 'edge_cmap',\n 'marker_size': 'node_size',\n }\n used_deprecated_params = set(kwargs).intersection(all_deprecated_params)\n for deprecated_param_ in used_deprecated_params:\n replacement_param = all_deprecated_params[deprecated_param_]\n param_deprecation_msg = (\n 'The parameter \"{}\" will be removed in Nilearn version 0.6.0. '\n 'Please use the parameter \"{}\" instead.'.format(deprecated_param_,\n replacement_param,\n )\n )\n warnings.filterwarnings('always', message=param_deprecation_msg)\n warnings.warn(category=DeprecationWarning,\n message=param_deprecation_msg,\n stacklevel=3)\n\n\ndef _transfer_deprecated_param_vals_view_connectome(kwargs):\n \"\"\" For view_connectome(), reassigns new parameters the values passed\n to their corresponding deprecated parameters.\n \"\"\"\n coords = kwargs.get('coords', None)\n threshold = kwargs.get('threshold', None)\n cmap = kwargs.get('cmap', None)\n marker_size = kwargs.get('marker_size', None)\n \n if coords is not None:\n kwargs['node_coords'] = coords\n if threshold is not None:\n kwargs['edge_threshold'] = threshold\n if cmap is not None:\n kwargs['edge_cmap'] = cmap\n if marker_size is not None:\n kwargs['node_size'] = marker_size\n return kwargs\n\n\ndef _deprecate_params_view_markers(func):\n \"\"\" Decorator to deprecate specific parameters in view_markers()\n without modifying view_markers().\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n _warn_deprecated_params_view_markers(kwargs)\n kwargs = _transfer_deprecated_param_vals_view_markers(kwargs)\n return func(*args, **kwargs)\n return wrapper\n\n\n@_deprecate_params_view_markers\ndef view_markers(marker_coords, marker_color=None, marker_size=5., **kwargs):\n \"\"\"\n Insert a 3d plot of markers in a brain into an HTML page.\n\n Parameters\n ----------\n marker_coords : ndarray, shape=(n_nodes, 3)\n the coordinates of the nodes in MNI space.\n\n marker_color : ndarray, shape=(n_nodes,)\n colors of the markers: list of strings, hex rgb or rgba strings, rgb\n triplets, or rgba triplets (i.e. formats accepted by matplotlib, see\n https://matplotlib.org/users/colors.html#specifying-colors)\n\n marker_size : float or array-like, optional (default=3.)\n Size of the markers showing the seeds in pixels.\n\n Returns\n -------\n ConnectomeView : plot of the markers.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.plot_connectome:\n projected views of a connectome in a glass brain.\n\n nilearn.plotting.view_connectome:\n interactive plot of a connectome.\n\n nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:\n interactive view of statistical maps or surface atlases on the cortical\n surface.\n\n \"\"\"\n if marker_color is None:\n marker_color = ['red' for i in range(len(marker_coords))]\n connectome_info = _get_markers(marker_coords, marker_color)\n if hasattr(marker_size, 'tolist'):\n marker_size = marker_size.tolist()\n connectome_info[\"marker_size\"] = marker_size\n return _make_connectome_html(connectome_info)\n\n\ndef _warn_deprecated_params_view_markers(kwargs):\n \"\"\" For view_markers(), raises warnings about deprecated parameters.\n \"\"\"\n\n all_deprecated_params = {'coords': 'marker_coords',\n 'colors': 'marker_color',\n }\n used_dperecated_params = set(kwargs).intersection(all_deprecated_params)\n for deprecated_param_ in used_dperecated_params:\n replacement_param = all_deprecated_params[deprecated_param_]\n param_deprecation_msg = (\n 'The parameter \"{}\" will be removed in Nilearn version 0.6.0. '\n 'Please use the parameter \"{}\" instead.'.format(deprecated_param_,\n replacement_param,\n )\n )\n warnings.filterwarnings('always', message=param_deprecation_msg)\n warnings.warn(category=DeprecationWarning,\n message=param_deprecation_msg,\n stacklevel=3,\n )\n\n\ndef _transfer_deprecated_param_vals_view_markers(kwargs):\n \"\"\" For view_markers(), reassigns new parameters the values passed\n to their corresponding deprecated parameters.\n \"\"\"\n coords = kwargs.get('coords', None)\n colors = kwargs.get('colors', None)\n \n if coords is not None:\n kwargs['marker_coords'] = coords\n if colors is not None:\n kwargs['marker_color'] = colors\n return kwargs\n", "path": "nilearn/plotting/html_connectome.py"}]}
2,823
129
gh_patches_debug_7174
rasdani/github-patches
git_diff
cowrie__cowrie-1054
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug in csirtg plugin @wesyoung Not sure when this bug started, but just looked today at my honeypots and saw this happening all over the place in the logs. ``` 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Unhandled error in Deferred: 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 289, in connectionLost protocol.connectionLost(reason) File "/usr/local/lib/python2.7/dist-packages/twisted/web/client.py", line 223, in connectionLost self.factory._disconnectedDeferred.callback(None) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 459, in callback self._startRunCallbacks(result) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 567, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/cowrie/cowrie/cowrie/commands/wget.py", line 241, in error url=self.url) File "/home/cowrie/cowrie/cowrie/shell/protocol.py", line 80, in logDispatch pt.factory.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/telnet/transport.py", line 43, in logDispatch output.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/core/output.py", line 117, in logDispatch self.emit(ev) File "/home/cowrie/cowrie/cowrie/core/output.py", line 206, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie/output/csirtg.py", line 43, in write system = e['system'] exceptions.KeyError: 'system' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cowrie/output/csirtg.py` Content: ``` 1 from __future__ import absolute_import, division 2 3 import os 4 from datetime import datetime 5 6 from csirtgsdk.client import Client 7 from csirtgsdk.indicator import Indicator 8 9 from twisted.python import log 10 11 import cowrie.core.output 12 from cowrie.core.config import CONFIG 13 14 USERNAME = os.environ.get('CSIRTG_USER') 15 FEED = os.environ.get('CSIRTG_FEED') 16 TOKEN = os.environ.get('CSIRG_TOKEN') 17 DESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity') 18 19 20 class Output(cowrie.core.output.Output): 21 def __init__(self): 22 self.user = CONFIG.get('output_csirtg', 'username') or USERNAME 23 self.feed = CONFIG.get('output_csirtg', 'feed') or FEED 24 self.token = CONFIG.get('output_csirtg', 'token') or TOKEN 25 try: 26 self.description = CONFIG.get('output_csirtg', 'description') 27 except Exception: 28 self.description = DESCRIPTION 29 self.context = {} 30 self.client = Client(token=self.token) 31 cowrie.core.output.Output.__init__(self) 32 33 def start(self, ): 34 pass 35 36 def stop(self): 37 pass 38 39 def write(self, e): 40 peerIP = e['src_ip'] 41 ts = e['timestamp'] 42 system = e['system'] 43 44 if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: 45 return 46 47 today = str(datetime.now().date()) 48 49 if not self.context.get(today): 50 self.context = {} 51 self.context[today] = set() 52 53 key = ','.join([peerIP, system]) 54 55 if key in self.context[today]: 56 return 57 58 self.context[today].add(key) 59 60 tags = 'scanner,ssh' 61 port = 22 62 if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory': 63 tags = 'scanner,telnet' 64 port = 23 65 66 i = { 67 'user': self.user, 68 'feed': self.feed, 69 'indicator': peerIP, 70 'portlist': port, 71 'protocol': 'tcp', 72 'tags': tags, 73 'firsttime': ts, 74 'lasttime': ts, 75 'description': self.description 76 } 77 78 ret = Indicator(self.client, i).submit() 79 log.msg('logged to csirtg %s ' % ret['location']) 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py --- a/src/cowrie/output/csirtg.py +++ b/src/cowrie/output/csirtg.py @@ -39,9 +39,10 @@ def write(self, e): peerIP = e['src_ip'] ts = e['timestamp'] - system = e['system'] + system = e.get('system', None) - if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: + if system not in ['cowrie.ssh.factory.CowrieSSHFactory', + 'cowrie.telnet.transport.HoneyPotTelnetFactory']: return today = str(datetime.now().date())
{"golden_diff": "diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py\n--- a/src/cowrie/output/csirtg.py\n+++ b/src/cowrie/output/csirtg.py\n@@ -39,9 +39,10 @@\n def write(self, e):\n peerIP = e['src_ip']\n ts = e['timestamp']\n- system = e['system']\n+ system = e.get('system', None)\n \n- if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n+ if system not in ['cowrie.ssh.factory.CowrieSSHFactory',\n+ 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n return\n \n today = str(datetime.now().date())\n", "issue": "Bug in csirtg plugin\n@wesyoung Not sure when this bug started, but just looked today at my honeypots and saw this happening all over the place in the logs.\r\n\r\n```\r\n2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Unhandled error in Deferred:\r\n2018-02-11T16:53:14-0500 [twisted.internet.defer#critical]\r\n\tTraceback (most recent call last):\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py\", line 289, in connectionLost\r\n\t protocol.connectionLost(reason)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/web/client.py\", line 223, in connectionLost\r\n\t self.factory._disconnectedDeferred.callback(None)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 459, in callback\r\n\t self._startRunCallbacks(result)\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 567, in _startRunCallbacks\r\n\t self._runCallbacks()\r\n\t--- <exception caught here> ---\r\n\t File \"/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 653, in _runCallbacks\r\n\t current.result = callback(current.result, *args, **kw)\r\n\t File \"/home/cowrie/cowrie/cowrie/commands/wget.py\", line 241, in error\r\n\t url=self.url)\r\n\t File \"/home/cowrie/cowrie/cowrie/shell/protocol.py\", line 80, in logDispatch\r\n\t pt.factory.logDispatch(*msg, **args)\r\n\t File \"/home/cowrie/cowrie/cowrie/telnet/transport.py\", line 43, in logDispatch\r\n\t output.logDispatch(*msg, **args)\r\n\t File \"/home/cowrie/cowrie/cowrie/core/output.py\", line 117, in logDispatch\r\n\t self.emit(ev)\r\n\t File \"/home/cowrie/cowrie/cowrie/core/output.py\", line 206, in emit\r\n\t self.write(ev)\r\n\t File \"/home/cowrie/cowrie/cowrie/output/csirtg.py\", line 43, in write\r\n\t system = e['system']\r\n\texceptions.KeyError: 'system'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division\n\nimport os\nfrom datetime import datetime\n\nfrom csirtgsdk.client import Client\nfrom csirtgsdk.indicator import Indicator\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nUSERNAME = os.environ.get('CSIRTG_USER')\nFEED = os.environ.get('CSIRTG_FEED')\nTOKEN = os.environ.get('CSIRG_TOKEN')\nDESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity')\n\n\nclass Output(cowrie.core.output.Output):\n def __init__(self):\n self.user = CONFIG.get('output_csirtg', 'username') or USERNAME\n self.feed = CONFIG.get('output_csirtg', 'feed') or FEED\n self.token = CONFIG.get('output_csirtg', 'token') or TOKEN\n try:\n self.description = CONFIG.get('output_csirtg', 'description')\n except Exception:\n self.description = DESCRIPTION\n self.context = {}\n self.client = Client(token=self.token)\n cowrie.core.output.Output.__init__(self)\n\n def start(self, ):\n pass\n\n def stop(self):\n pass\n\n def write(self, e):\n peerIP = e['src_ip']\n ts = e['timestamp']\n system = e['system']\n\n if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = ','.join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = 'scanner,ssh'\n port = 22\n if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory':\n tags = 'scanner,telnet'\n port = 23\n\n i = {\n 'user': self.user,\n 'feed': self.feed,\n 'indicator': peerIP,\n 'portlist': port,\n 'protocol': 'tcp',\n 'tags': tags,\n 'firsttime': ts,\n 'lasttime': ts,\n 'description': self.description\n }\n\n ret = Indicator(self.client, i).submit()\n log.msg('logged to csirtg %s ' % ret['location'])\n", "path": "src/cowrie/output/csirtg.py"}], "after_files": [{"content": "from __future__ import absolute_import, division\n\nimport os\nfrom datetime import datetime\n\nfrom csirtgsdk.client import Client\nfrom csirtgsdk.indicator import Indicator\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nUSERNAME = os.environ.get('CSIRTG_USER')\nFEED = os.environ.get('CSIRTG_FEED')\nTOKEN = os.environ.get('CSIRG_TOKEN')\nDESCRIPTION = os.environ.get('CSIRTG_DESCRIPTION', 'random scanning activity')\n\n\nclass Output(cowrie.core.output.Output):\n def __init__(self):\n self.user = CONFIG.get('output_csirtg', 'username') or USERNAME\n self.feed = CONFIG.get('output_csirtg', 'feed') or FEED\n self.token = CONFIG.get('output_csirtg', 'token') or TOKEN\n try:\n self.description = CONFIG.get('output_csirtg', 'description')\n except Exception:\n self.description = DESCRIPTION\n self.context = {}\n self.client = Client(token=self.token)\n cowrie.core.output.Output.__init__(self)\n\n def start(self, ):\n pass\n\n def stop(self):\n pass\n\n def write(self, e):\n peerIP = e['src_ip']\n ts = e['timestamp']\n system = e.get('system', None)\n\n if system not in ['cowrie.ssh.factory.CowrieSSHFactory',\n 'cowrie.telnet.transport.HoneyPotTelnetFactory']:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = ','.join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = 'scanner,ssh'\n port = 22\n if e['system'] == 'cowrie.telnet.transport.HoneyPotTelnetFactory':\n tags = 'scanner,telnet'\n port = 23\n\n i = {\n 'user': self.user,\n 'feed': self.feed,\n 'indicator': peerIP,\n 'portlist': port,\n 'protocol': 'tcp',\n 'tags': tags,\n 'firsttime': ts,\n 'lasttime': ts,\n 'description': self.description\n }\n\n ret = Indicator(self.client, i).submit()\n log.msg('logged to csirtg %s ' % ret['location'])\n", "path": "src/cowrie/output/csirtg.py"}]}
1,499
180
gh_patches_debug_5664
rasdani/github-patches
git_diff
cisagov__manage.get.gov-278
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DomainApplication list on logged in user homepage Outcome: A list (limited to one now) of domain applications on the homepage of a logged in user. A/C: As a user I want to see my in progress applications for a domain. As a user I want to be able to select an application that is `In Progress` and continue my progress. As a user I want to be able to view submitted applications. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/registrar/views/index.py` Content: ``` 1 from django.shortcuts import render 2 3 4 def index(request): 5 """This page is available to anyone without logging in.""" 6 return render(request, "home.html") 7 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py --- a/src/registrar/views/index.py +++ b/src/registrar/views/index.py @@ -1,6 +1,12 @@ from django.shortcuts import render +from registrar.models import DomainApplication + def index(request): """This page is available to anyone without logging in.""" - return render(request, "home.html") + context = {} + if request.user.is_authenticated: + applications = DomainApplication.objects.filter(creator=request.user) + context["domain_applications"] = applications + return render(request, "home.html", context)
{"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -1,6 +1,12 @@\n from django.shortcuts import render\n \n+from registrar.models import DomainApplication\n+\n \n def index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n- return render(request, \"home.html\")\n+ context = {}\n+ if request.user.is_authenticated:\n+ applications = DomainApplication.objects.filter(creator=request.user)\n+ context[\"domain_applications\"] = applications\n+ return render(request, \"home.html\", context)\n", "issue": "DomainApplication list on logged in user homepage\nOutcome: A list (limited to one now) of domain applications on the homepage of a logged in user. \n\nA/C: As a user I want to see my in progress applications for a domain. As a user I want to be able to select an application that is `In Progress` and continue my progress. As a user I want to be able to view submitted applications. \n", "before_files": [{"content": "from django.shortcuts import render\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n return render(request, \"home.html\")\n", "path": "src/registrar/views/index.py"}], "after_files": [{"content": "from django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n context[\"domain_applications\"] = applications\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]}
387
138
gh_patches_debug_57620
rasdani/github-patches
git_diff
SigmaHQ__sigma-2144
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- sigmac - invalid value in STATE filter In specification it states that there are three statuses: * stable * test * experimental https://github.com/SigmaHQ/sigma/wiki/Specification#status-optional Problem arises when using sigmac with filter option and trying to filter out status **test**, because in filter options it states that status is one of these values: experimental, testing, stable. Specification says **test**, but filter looks for status **testing**. ``` --filter FILTER, -f FILTER Define comma-separated filters that must match (AND- linked) to rule to be processed. Valid filters: level<=x, level>=x, level=x, status=y, logsource=z, tag=t. x is one of: low, medium, high, critical. y is one of: experimental, testing, stable. z is a word appearing in an arbitrary log source attribute. t is a tag that must appear in the rules tag list, case- insensitive matching. Multiple log source specifications are AND linked. ``` sigma-master\tools\sigma\filter.py `line 27: STATES = ["experimental", "testing", "stable"] ` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/sigma/filter.py` Content: ``` 1 # Sigma parser 2 # Copyright 2016-2018 Thomas Patzke, Florian Roth 3 4 # This program is free software: you can redistribute it and/or modify 5 # it under the terms of the GNU Lesser General Public License as published by 6 # the Free Software Foundation, either version 3 of the License, or 7 # (at your option) any later version. 8 9 # This program is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU Lesser General Public License for more details. 13 14 # You should have received a copy of the GNU Lesser General Public License 15 # along with this program. If not, see <http://www.gnu.org/licenses/>. 16 17 # Rule Filtering 18 import datetime 19 class SigmaRuleFilter: 20 """Filter for Sigma rules with conditions""" 21 LEVELS = { 22 "low" : 0, 23 "medium" : 1, 24 "high" : 2, 25 "critical" : 3 26 } 27 STATES = ["experimental", "testing", "stable"] 28 29 def __init__(self, expr): 30 self.minlevel = None 31 self.maxlevel = None 32 self.status = None 33 self.tlp = None 34 self.target = None 35 self.logsources = list() 36 self.notlogsources = list() 37 self.tags = list() 38 self.nottags = list() 39 self.inlastday = None 40 self.condition = list() 41 self.notcondition = list() 42 43 for cond in [c.replace(" ", "") for c in expr.split(",")]: 44 if cond.startswith("level<="): 45 try: 46 level = cond[cond.index("=") + 1:] 47 self.maxlevel = self.LEVELS[level] 48 except KeyError as e: 49 raise SigmaRuleFilterParseException("Unknown level '%s' in condition '%s'" % (level, cond)) from e 50 elif cond.startswith("level>="): 51 try: 52 level = cond[cond.index("=") + 1:] 53 self.minlevel = self.LEVELS[level] 54 except KeyError as e: 55 raise SigmaRuleFilterParseException("Unknown level '%s' in condition '%s'" % (level, cond)) from e 56 elif cond.startswith("level="): 57 try: 58 level = cond[cond.index("=") + 1:] 59 self.minlevel = self.LEVELS[level] 60 self.maxlevel = self.minlevel 61 except KeyError as e: 62 raise SigmaRuleFilterParseException("Unknown level '%s' in condition '%s'" % (level, cond)) from e 63 elif cond.startswith("status="): 64 self.status = cond[cond.index("=") + 1:] 65 if self.status not in self.STATES: 66 raise SigmaRuleFilterParseException("Unknown status '%s' in condition '%s'" % (self.status, cond)) 67 elif cond.startswith("tlp="): 68 self.tlp = cond[cond.index("=") + 1:].upper() #tlp is always uppercase 69 elif cond.startswith("target="): 70 self.target = cond[cond.index("=") + 1:].lower() # lower to make caseinsensitive 71 elif cond.startswith("logsource="): 72 self.logsources.append(cond[cond.index("=") + 1:]) 73 elif cond.startswith("logsource!="): 74 self.notlogsources.append(cond[cond.index("=") + 1:]) 75 elif cond.startswith("tag="): 76 self.tags.append(cond[cond.index("=") + 1:].lower()) 77 elif cond.startswith("tag!="): 78 self.nottags.append(cond[cond.index("=") + 1:].lower()) 79 elif cond.startswith("condition="): 80 self.condition.append(cond[cond.index("=") + 1:].lower()) 81 elif cond.startswith("condition!="): 82 self.notcondition.append(cond[cond.index("=") + 1:].lower()) 83 elif cond.startswith("inlastday="): 84 nbday = cond[cond.index("=") + 1:] 85 try: 86 self.inlastday = int(nbday) 87 except ValueError as e: 88 raise SigmaRuleFilterParseException("Unknown number '%s' in condition '%s'" % (nbday, cond)) from e 89 else: 90 raise SigmaRuleFilterParseException("Unknown condition '%s'" % cond) 91 92 def match(self, yamldoc): 93 """Match filter conditions against rule""" 94 # Levels 95 if self.minlevel is not None or self.maxlevel is not None: 96 try: 97 level = self.LEVELS[yamldoc['level']] 98 except KeyError: # missing or invalid level 99 return False # User wants level restriction, but it's not possible here 100 101 # Minimum level 102 if self.minlevel is not None: 103 if level < self.minlevel: 104 return False 105 # Maximum level 106 if self.maxlevel is not None: 107 if level > self.maxlevel: 108 return False 109 110 # Status 111 if self.status is not None: 112 try: 113 status = yamldoc['status'] 114 except KeyError: # missing status 115 return False # User wants status restriction, but it's not possible here 116 if status != self.status: 117 return False 118 119 # Tlp 120 if self.tlp is not None: 121 try: 122 tlp = yamldoc['tlp'] 123 except KeyError: # missing tlp 124 tlp = "WHITE" # tlp is WHITE by default 125 if tlp != self.tlp: 126 return False 127 128 #Target 129 if self.target: 130 try: 131 targets = [ target.lower() for target in yamldoc['target']] 132 except (KeyError, AttributeError): # no target set 133 return False 134 if self.target not in targets: 135 return False 136 137 # Log Sources 138 if self.logsources: 139 try: 140 logsources = { value for key, value in yamldoc['logsource'].items() } 141 except (KeyError, AttributeError): # no log source set 142 return False # User wants status restriction, but it's not possible here 143 144 for logsrc in self.logsources: 145 if logsrc not in logsources: 146 return False 147 148 # NOT Log Sources 149 if self.notlogsources: 150 try: 151 notlogsources = { value for key, value in yamldoc['logsource'].items() } 152 except (KeyError, AttributeError): # no log source set 153 return False # User wants status restriction, but it's not possible here 154 155 for logsrc in self.notlogsources: 156 if logsrc in notlogsources: 157 return False 158 159 # Tags 160 if self.tags: 161 try: 162 tags = [ tag.lower() for tag in yamldoc['tags']] 163 except (KeyError, AttributeError): # no tags set 164 return False 165 166 for tag in self.tags: 167 if tag not in tags: 168 return False 169 # NOT Tags 170 if self.nottags: 171 try: 172 nottags = [ tag.lower() for tag in yamldoc['tags']] 173 except (KeyError, AttributeError): # no tags set 174 return False 175 176 for tag in self.nottags: 177 if tag in nottags: 178 return False 179 180 # date in the last N days 181 if self.inlastday: 182 try: 183 date_str = yamldoc['date'] 184 except KeyError: # missing date 185 return False # User wants date time restriction, but it's not possible here 186 187 try: 188 modified_str = yamldoc['modified'] 189 except KeyError: # no update 190 modified_str = None 191 if modified_str: 192 date_str = modified_str 193 194 date_object = datetime.datetime.strptime(date_str, '%Y/%m/%d') 195 today_objet = datetime.datetime.now() 196 delta = today_objet - date_object 197 if delta.days > self.inlastday: 198 return False 199 200 if self.condition: 201 try: 202 conditions = yamldoc['detection']['condition'] 203 if isinstance(conditions,list): # sone time conditions are list even with only 1 line 204 s_condition = ' '.join(conditions) 205 else: 206 s_condition = conditions 207 except KeyError: # missing condition 208 return False # User wants condition restriction, but it's not possible here 209 for val in self.condition: 210 if not val in s_condition: 211 return False 212 213 if self.notcondition: 214 try: 215 conditions = yamldoc['detection']['condition'] 216 if isinstance(conditions,list): # sone time conditions are list even with only 1 line 217 s_condition = ' '.join(conditions) 218 else: 219 s_condition = conditions 220 except KeyError: # missing condition 221 return False # User wants condition restriction, but it's not possible here 222 for val in self.notcondition: 223 if val in s_condition: 224 return False 225 226 # all tests passed 227 return True 228 229 class SigmaRuleFilterParseException(Exception): 230 pass 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/sigma/filter.py b/tools/sigma/filter.py --- a/tools/sigma/filter.py +++ b/tools/sigma/filter.py @@ -24,7 +24,9 @@ "high" : 2, "critical" : 3 } - STATES = ["experimental", "testing", "stable"] + STATES = ["experimental", + "test", + "stable"] def __init__(self, expr): self.minlevel = None
{"golden_diff": "diff --git a/tools/sigma/filter.py b/tools/sigma/filter.py\n--- a/tools/sigma/filter.py\n+++ b/tools/sigma/filter.py\n@@ -24,7 +24,9 @@\n \"high\" : 2,\n \"critical\" : 3\n }\n- STATES = [\"experimental\", \"testing\", \"stable\"]\n+ STATES = [\"experimental\",\n+ \"test\",\n+ \"stable\"]\n \n def __init__(self, expr):\n self.minlevel = None\n", "issue": "sigmac - invalid value in STATE filter\nIn specification it states that there are three statuses:\r\n* stable\r\n* test\r\n* experimental \r\nhttps://github.com/SigmaHQ/sigma/wiki/Specification#status-optional\r\n\r\nProblem arises when using sigmac with filter option and trying to filter out status **test**, because in filter options it states that status is one of these values: experimental, testing, stable.\r\nSpecification says **test**, but filter looks for status **testing**.\r\n```\r\n--filter FILTER, -f FILTER\r\n Define comma-separated filters that must match (AND-\r\n linked) to rule to be processed. Valid filters:\r\n level<=x, level>=x, level=x, status=y, logsource=z,\r\n tag=t. x is one of: low, medium, high, critical. y is\r\n one of: experimental, testing, stable. z is a word\r\n appearing in an arbitrary log source attribute. t is a\r\n tag that must appear in the rules tag list, case-\r\n insensitive matching. Multiple log source\r\n specifications are AND linked.\r\n \r\n\r\n```\r\n\r\nsigma-master\\tools\\sigma\\filter.py\r\n`line 27: STATES = [\"experimental\", \"testing\", \"stable\"] `\n", "before_files": [{"content": "# Sigma parser\n# Copyright 2016-2018 Thomas Patzke, Florian Roth\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n# Rule Filtering\nimport datetime\nclass SigmaRuleFilter:\n \"\"\"Filter for Sigma rules with conditions\"\"\"\n LEVELS = {\n \"low\" : 0,\n \"medium\" : 1,\n \"high\" : 2,\n \"critical\" : 3\n }\n STATES = [\"experimental\", \"testing\", \"stable\"]\n\n def __init__(self, expr):\n self.minlevel = None\n self.maxlevel = None\n self.status = None\n self.tlp = None\n self.target = None\n self.logsources = list()\n self.notlogsources = list()\n self.tags = list()\n self.nottags = list()\n self.inlastday = None\n self.condition = list()\n self.notcondition = list()\n\n for cond in [c.replace(\" \", \"\") for c in expr.split(\",\")]:\n if cond.startswith(\"level<=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.maxlevel = self.LEVELS[level]\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"level>=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.minlevel = self.LEVELS[level]\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"level=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.minlevel = self.LEVELS[level]\n self.maxlevel = self.minlevel\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"status=\"):\n self.status = cond[cond.index(\"=\") + 1:]\n if self.status not in self.STATES:\n raise SigmaRuleFilterParseException(\"Unknown status '%s' in condition '%s'\" % (self.status, cond))\n elif cond.startswith(\"tlp=\"):\n self.tlp = cond[cond.index(\"=\") + 1:].upper() #tlp is always uppercase\n elif cond.startswith(\"target=\"):\n self.target = cond[cond.index(\"=\") + 1:].lower() # lower to make caseinsensitive\n elif cond.startswith(\"logsource=\"):\n self.logsources.append(cond[cond.index(\"=\") + 1:])\n elif cond.startswith(\"logsource!=\"):\n self.notlogsources.append(cond[cond.index(\"=\") + 1:])\n elif cond.startswith(\"tag=\"):\n self.tags.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"tag!=\"):\n self.nottags.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"condition=\"):\n self.condition.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"condition!=\"):\n self.notcondition.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"inlastday=\"):\n nbday = cond[cond.index(\"=\") + 1:]\n try:\n self.inlastday = int(nbday)\n except ValueError as e:\n raise SigmaRuleFilterParseException(\"Unknown number '%s' in condition '%s'\" % (nbday, cond)) from e\n else:\n raise SigmaRuleFilterParseException(\"Unknown condition '%s'\" % cond)\n\n def match(self, yamldoc):\n \"\"\"Match filter conditions against rule\"\"\"\n # Levels\n if self.minlevel is not None or self.maxlevel is not None:\n try:\n level = self.LEVELS[yamldoc['level']]\n except KeyError: # missing or invalid level\n return False # User wants level restriction, but it's not possible here\n\n # Minimum level\n if self.minlevel is not None:\n if level < self.minlevel:\n return False\n # Maximum level\n if self.maxlevel is not None:\n if level > self.maxlevel:\n return False\n\n # Status\n if self.status is not None:\n try:\n status = yamldoc['status']\n except KeyError: # missing status\n return False # User wants status restriction, but it's not possible here\n if status != self.status:\n return False\n\n # Tlp\n if self.tlp is not None:\n try:\n tlp = yamldoc['tlp']\n except KeyError: # missing tlp\n tlp = \"WHITE\" # tlp is WHITE by default\n if tlp != self.tlp:\n return False\n\n #Target\n if self.target:\n try:\n targets = [ target.lower() for target in yamldoc['target']]\n except (KeyError, AttributeError): # no target set\n return False\n if self.target not in targets:\n return False\n\n # Log Sources\n if self.logsources:\n try:\n logsources = { value for key, value in yamldoc['logsource'].items() }\n except (KeyError, AttributeError): # no log source set\n return False # User wants status restriction, but it's not possible here\n\n for logsrc in self.logsources:\n if logsrc not in logsources:\n return False\n\n # NOT Log Sources\n if self.notlogsources:\n try:\n notlogsources = { value for key, value in yamldoc['logsource'].items() }\n except (KeyError, AttributeError): # no log source set\n return False # User wants status restriction, but it's not possible here\n\n for logsrc in self.notlogsources:\n if logsrc in notlogsources:\n return False\n\n # Tags\n if self.tags:\n try:\n tags = [ tag.lower() for tag in yamldoc['tags']]\n except (KeyError, AttributeError): # no tags set\n return False\n\n for tag in self.tags:\n if tag not in tags:\n return False\n # NOT Tags\n if self.nottags:\n try:\n nottags = [ tag.lower() for tag in yamldoc['tags']]\n except (KeyError, AttributeError): # no tags set\n return False\n\n for tag in self.nottags:\n if tag in nottags:\n return False\n\n # date in the last N days\n if self.inlastday:\n try:\n date_str = yamldoc['date']\n except KeyError: # missing date\n return False # User wants date time restriction, but it's not possible here\n\n try:\n modified_str = yamldoc['modified']\n except KeyError: # no update\n modified_str = None\n if modified_str:\n date_str = modified_str\n\n date_object = datetime.datetime.strptime(date_str, '%Y/%m/%d')\n today_objet = datetime.datetime.now()\n delta = today_objet - date_object\n if delta.days > self.inlastday:\n return False\n\n if self.condition:\n try:\n conditions = yamldoc['detection']['condition']\n if isinstance(conditions,list): # sone time conditions are list even with only 1 line\n s_condition = ' '.join(conditions)\n else:\n s_condition = conditions\n except KeyError: # missing condition\n return False # User wants condition restriction, but it's not possible here\n for val in self.condition:\n if not val in s_condition:\n return False\n\n if self.notcondition:\n try:\n conditions = yamldoc['detection']['condition']\n if isinstance(conditions,list): # sone time conditions are list even with only 1 line\n s_condition = ' '.join(conditions)\n else:\n s_condition = conditions\n except KeyError: # missing condition\n return False # User wants condition restriction, but it's not possible here\n for val in self.notcondition:\n if val in s_condition:\n return False\n\n # all tests passed\n return True\n\nclass SigmaRuleFilterParseException(Exception):\n pass\n", "path": "tools/sigma/filter.py"}], "after_files": [{"content": "# Sigma parser\n# Copyright 2016-2018 Thomas Patzke, Florian Roth\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n# Rule Filtering\nimport datetime\nclass SigmaRuleFilter:\n \"\"\"Filter for Sigma rules with conditions\"\"\"\n LEVELS = {\n \"low\" : 0,\n \"medium\" : 1,\n \"high\" : 2,\n \"critical\" : 3\n }\n STATES = [\"experimental\",\n \"test\",\n \"stable\"]\n\n def __init__(self, expr):\n self.minlevel = None\n self.maxlevel = None\n self.status = None\n self.tlp = None\n self.target = None\n self.logsources = list()\n self.notlogsources = list()\n self.tags = list()\n self.nottags = list()\n self.inlastday = None\n self.condition = list()\n self.notcondition = list()\n\n for cond in [c.replace(\" \", \"\") for c in expr.split(\",\")]:\n if cond.startswith(\"level<=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.maxlevel = self.LEVELS[level]\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"level>=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.minlevel = self.LEVELS[level]\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"level=\"):\n try:\n level = cond[cond.index(\"=\") + 1:]\n self.minlevel = self.LEVELS[level]\n self.maxlevel = self.minlevel\n except KeyError as e:\n raise SigmaRuleFilterParseException(\"Unknown level '%s' in condition '%s'\" % (level, cond)) from e\n elif cond.startswith(\"status=\"):\n self.status = cond[cond.index(\"=\") + 1:]\n if self.status not in self.STATES:\n raise SigmaRuleFilterParseException(\"Unknown status '%s' in condition '%s'\" % (self.status, cond))\n elif cond.startswith(\"tlp=\"):\n self.tlp = cond[cond.index(\"=\") + 1:].upper() #tlp is always uppercase\n elif cond.startswith(\"target=\"):\n self.target = cond[cond.index(\"=\") + 1:].lower() # lower to make caseinsensitive\n elif cond.startswith(\"logsource=\"):\n self.logsources.append(cond[cond.index(\"=\") + 1:])\n elif cond.startswith(\"logsource!=\"):\n self.notlogsources.append(cond[cond.index(\"=\") + 1:])\n elif cond.startswith(\"tag=\"):\n self.tags.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"tag!=\"):\n self.nottags.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"condition=\"):\n self.condition.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"condition!=\"):\n self.notcondition.append(cond[cond.index(\"=\") + 1:].lower())\n elif cond.startswith(\"inlastday=\"):\n nbday = cond[cond.index(\"=\") + 1:]\n try:\n self.inlastday = int(nbday)\n except ValueError as e:\n raise SigmaRuleFilterParseException(\"Unknown number '%s' in condition '%s'\" % (nbday, cond)) from e\n else:\n raise SigmaRuleFilterParseException(\"Unknown condition '%s'\" % cond)\n\n def match(self, yamldoc):\n \"\"\"Match filter conditions against rule\"\"\"\n # Levels\n if self.minlevel is not None or self.maxlevel is not None:\n try:\n level = self.LEVELS[yamldoc['level']]\n except KeyError: # missing or invalid level\n return False # User wants level restriction, but it's not possible here\n\n # Minimum level\n if self.minlevel is not None:\n if level < self.minlevel:\n return False\n # Maximum level\n if self.maxlevel is not None:\n if level > self.maxlevel:\n return False\n\n # Status\n if self.status is not None:\n try:\n status = yamldoc['status']\n except KeyError: # missing status\n return False # User wants status restriction, but it's not possible here\n if status != self.status:\n return False\n\n # Tlp\n if self.tlp is not None:\n try:\n tlp = yamldoc['tlp']\n except KeyError: # missing tlp\n tlp = \"WHITE\" # tlp is WHITE by default\n if tlp != self.tlp:\n return False\n\n #Target\n if self.target:\n try:\n targets = [ target.lower() for target in yamldoc['target']]\n except (KeyError, AttributeError): # no target set\n return False\n if self.target not in targets:\n return False\n\n # Log Sources\n if self.logsources:\n try:\n logsources = { value for key, value in yamldoc['logsource'].items() }\n except (KeyError, AttributeError): # no log source set\n return False # User wants status restriction, but it's not possible here\n\n for logsrc in self.logsources:\n if logsrc not in logsources:\n return False\n\n # NOT Log Sources\n if self.notlogsources:\n try:\n notlogsources = { value for key, value in yamldoc['logsource'].items() }\n except (KeyError, AttributeError): # no log source set\n return False # User wants status restriction, but it's not possible here\n\n for logsrc in self.notlogsources:\n if logsrc in notlogsources:\n return False\n\n # Tags\n if self.tags:\n try:\n tags = [ tag.lower() for tag in yamldoc['tags']]\n except (KeyError, AttributeError): # no tags set\n return False\n\n for tag in self.tags:\n if tag not in tags:\n return False\n # NOT Tags\n if self.nottags:\n try:\n nottags = [ tag.lower() for tag in yamldoc['tags']]\n except (KeyError, AttributeError): # no tags set\n return False\n\n for tag in self.nottags:\n if tag in nottags:\n return False\n\n # date in the last N days\n if self.inlastday:\n try:\n date_str = yamldoc['date']\n except KeyError: # missing date\n return False # User wants date time restriction, but it's not possible here\n\n try:\n modified_str = yamldoc['modified']\n except KeyError: # no update\n modified_str = None\n if modified_str:\n date_str = modified_str\n\n date_object = datetime.datetime.strptime(date_str, '%Y/%m/%d')\n today_objet = datetime.datetime.now()\n delta = today_objet - date_object\n if delta.days > self.inlastday:\n return False\n\n if self.condition:\n try:\n conditions = yamldoc['detection']['condition']\n if isinstance(conditions,list): # sone time conditions are list even with only 1 line\n s_condition = ' '.join(conditions)\n else:\n s_condition = conditions\n except KeyError: # missing condition\n return False # User wants condition restriction, but it's not possible here\n for val in self.condition:\n if not val in s_condition:\n return False\n\n if self.notcondition:\n try:\n conditions = yamldoc['detection']['condition']\n if isinstance(conditions,list): # sone time conditions are list even with only 1 line\n s_condition = ' '.join(conditions)\n else:\n s_condition = conditions\n except KeyError: # missing condition\n return False # User wants condition restriction, but it's not possible here\n for val in self.notcondition:\n if val in s_condition:\n return False\n\n # all tests passed\n return True\n\nclass SigmaRuleFilterParseException(Exception):\n pass\n", "path": "tools/sigma/filter.py"}]}
3,088
111
gh_patches_debug_14496
rasdani/github-patches
git_diff
inventree__InvenTree-1284
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Restore backup I have found several mentions of doing a backup in the documentation but no mention of how to perform the restore. In my test instance I've configured "backup_dir" in config.yaml and performed a backup with "inv backup". Could not find a "restore" method defined in tasks.py Is there something I'm missing? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tasks.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from invoke import task 4 from shutil import copyfile 5 6 import random 7 import string 8 import os 9 import sys 10 11 def apps(): 12 """ 13 Returns a list of installed apps 14 """ 15 16 return [ 17 'barcode', 18 'build', 19 'common', 20 'company', 21 'label', 22 'order', 23 'part', 24 'report', 25 'stock', 26 'InvenTree', 27 'users', 28 ] 29 30 def localDir(): 31 """ 32 Returns the directory of *THIS* file. 33 Used to ensure that the various scripts always run 34 in the correct directory. 35 """ 36 return os.path.dirname(os.path.abspath(__file__)) 37 38 def managePyDir(): 39 """ 40 Returns the directory of the manage.py file 41 """ 42 43 return os.path.join(localDir(), 'InvenTree') 44 45 def managePyPath(): 46 """ 47 Return the path of the manage.py file 48 """ 49 50 return os.path.join(managePyDir(), 'manage.py') 51 52 def manage(c, cmd, pty=False): 53 """ 54 Runs a given command against django's "manage.py" script. 55 56 Args: 57 c - Command line context 58 cmd - django command to run 59 """ 60 61 c.run('cd {path} && python3 manage.py {cmd}'.format( 62 path=managePyDir(), 63 cmd=cmd 64 ), pty=pty) 65 66 @task(help={'length': 'Length of secret key (default=50)'}) 67 def key(c, length=50, force=False): 68 """ 69 Generates a SECRET_KEY file which InvenTree uses for generating security hashes 70 """ 71 72 SECRET_KEY_FILE = os.path.join(localDir(), 'InvenTree', 'secret_key.txt') 73 74 # If a SECRET_KEY file does not exist, generate a new one! 75 if force or not os.path.exists(SECRET_KEY_FILE): 76 print("Generating SECRET_KEY file - " + SECRET_KEY_FILE) 77 with open(SECRET_KEY_FILE, 'w') as key_file: 78 options = string.digits + string.ascii_letters + string.punctuation 79 80 key = ''.join([random.choice(options) for i in range(length)]) 81 82 key_file.write(key) 83 84 else: 85 print("SECRET_KEY file already exists - skipping") 86 87 88 @task(post=[key]) 89 def install(c): 90 """ 91 Installs required python packages, and runs initial setup functions. 92 """ 93 94 # Install required Python packages with PIP 95 c.run('pip3 install -U -r requirements.txt') 96 97 # If a config.yaml file does not exist, copy from the template! 98 CONFIG_FILE = os.path.join(localDir(), 'InvenTree', 'config.yaml') 99 CONFIG_TEMPLATE_FILE = os.path.join(localDir(), 'InvenTree', 'config_template.yaml') 100 101 if not os.path.exists(CONFIG_FILE): 102 print("Config file 'config.yaml' does not exist - copying from template.") 103 copyfile(CONFIG_TEMPLATE_FILE, CONFIG_FILE) 104 105 106 @task 107 def shell(c): 108 """ 109 Open a python shell with access to the InvenTree database models. 110 """ 111 112 manage(c, 'shell', pty=True) 113 114 115 @task 116 def superuser(c): 117 """ 118 Create a superuser (admin) account for the database. 119 """ 120 121 manage(c, 'createsuperuser', pty=True) 122 123 @task 124 def check(c): 125 """ 126 Check validity of django codebase 127 """ 128 129 manage(c, "check") 130 131 @task 132 def migrate(c): 133 """ 134 Performs database migrations. 135 This is a critical step if the database schema have been altered! 136 """ 137 138 print("Running InvenTree database migrations...") 139 print("========================================") 140 141 manage(c, "makemigrations") 142 manage(c, "migrate") 143 manage(c, "migrate --run-syncdb") 144 manage(c, "check") 145 146 print("========================================") 147 print("InvenTree database migrations completed!") 148 149 150 @task 151 def static(c): 152 """ 153 Copies required static files to the STATIC_ROOT directory, 154 as per Django requirements. 155 """ 156 157 manage(c, "collectstatic") 158 159 160 @task(pre=[install, migrate, static]) 161 def update(c): 162 """ 163 Update InvenTree installation. 164 165 This command should be invoked after source code has been updated, 166 e.g. downloading new code from GitHub. 167 168 The following tasks are performed, in order: 169 170 - install 171 - migrate 172 - static 173 """ 174 pass 175 176 @task 177 def translate(c): 178 """ 179 Regenerate translation files. 180 181 Run this command after added new translatable strings, 182 or after adding translations for existing strings. 183 """ 184 185 # Translate applicable .py / .html / .js files 186 manage(c, "makemessages -e py -e html -e js") 187 manage(c, "compilemessages") 188 189 path = os.path.join('InvenTree', 'script', 'translation_stats.py') 190 191 c.run(f'python {path}') 192 193 @task 194 def style(c): 195 """ 196 Run PEP style checks against InvenTree sourcecode 197 """ 198 199 print("Running PEP style checks...") 200 c.run('flake8 InvenTree') 201 202 @task 203 def test(c, database=None): 204 """ 205 Run unit-tests for InvenTree codebase. 206 """ 207 # Run sanity check on the django install 208 manage(c, 'check') 209 210 # Run coverage tests 211 manage(c, 'test', pty=True) 212 213 @task 214 def coverage(c): 215 """ 216 Run code-coverage of the InvenTree codebase, 217 using the 'coverage' code-analysis tools. 218 219 Generates a code coverage report (available in the htmlcov directory) 220 """ 221 222 # Run sanity check on the django install 223 manage(c, 'check') 224 225 # Run coverage tests 226 c.run('coverage run {manage} test {apps}'.format( 227 manage=managePyPath(), 228 apps=' '.join(apps()) 229 )) 230 231 # Generate coverage report 232 c.run('coverage html') 233 234 @task 235 def mysql(c): 236 """ 237 Install packages required for using InvenTree with a MySQL database. 238 """ 239 240 print('Installing packages required for MySQL') 241 242 c.run('sudo apt-get install mysql-server libmysqlclient-dev') 243 c.run('pip3 install mysqlclient') 244 245 @task 246 def postgresql(c): 247 """ 248 Install packages required for using InvenTree with a PostgreSQL database 249 """ 250 251 print("Installing packages required for PostgreSQL") 252 253 c.run('sudo apt-get install postgresql postgresql-contrib libpq-dev') 254 c.run('pip3 install psycopg2') 255 256 @task(help={'filename': "Output filename (default = 'data.json')"}) 257 def export_records(c, filename='data.json'): 258 """ 259 Export all database records to a file 260 """ 261 262 # Get an absolute path to the file 263 if not os.path.isabs(filename): 264 filename = os.path.join(localDir(), filename) 265 filename = os.path.abspath(filename) 266 267 print(f"Exporting database records to file '{filename}'") 268 269 if os.path.exists(filename): 270 response = input("Warning: file already exists. Do you want to overwrite? [y/N]: ") 271 response = str(response).strip().lower() 272 273 if response not in ['y', 'yes']: 274 print("Cancelled export operation") 275 sys.exit(1) 276 277 cmd = f'dumpdata --exclude contenttypes --exclude auth.permission --indent 2 --output {filename}' 278 279 manage(c, cmd, pty=True) 280 281 @task(help={'filename': 'Input filename'}) 282 def import_records(c, filename='data.json'): 283 """ 284 Import database records from a file 285 """ 286 287 # Get an absolute path to the supplied filename 288 if not os.path.isabs(filename): 289 filename = os.path.join(localDir(), filename) 290 291 if not os.path.exists(filename): 292 print(f"Error: File '{filename}' does not exist") 293 sys.exit(1) 294 295 print(f"Importing database records from '{filename}'") 296 297 cmd = f'loaddata {filename}' 298 299 manage(c, cmd, pty=True) 300 301 @task 302 def import_fixtures(c): 303 """ 304 Import fixture data into the database. 305 306 This command imports all existing test fixture data into the database. 307 308 Warning: 309 - Intended for testing / development only! 310 - Running this command may overwrite existing database data!! 311 - Don't say you were not warned... 312 """ 313 314 fixtures = [ 315 # Build model 316 'build', 317 318 # Common models 319 'settings', 320 321 # Company model 322 'company', 323 'price_breaks', 324 'supplier_part', 325 326 # Order model 327 'order', 328 329 # Part model 330 'bom', 331 'category', 332 'params', 333 'part', 334 'test_templates', 335 336 # Stock model 337 'location', 338 'stock_tests', 339 'stock', 340 ] 341 342 command = 'loaddata ' + ' '.join(fixtures) 343 344 manage(c, command, pty=True) 345 346 @task 347 def backup(c): 348 """ 349 Create a backup of database models and uploaded media files. 350 351 Backup files will be written to the 'backup_dir' file specified in 'config.yaml' 352 353 """ 354 355 manage(c, 'dbbackup') 356 manage(c, 'mediabackup') 357 358 @task(help={'address': 'Server address:port (default=127.0.0.1:8000)'}) 359 def server(c, address="127.0.0.1:8000"): 360 """ 361 Launch a (deveopment) server using Django's in-built webserver. 362 363 Note: This is *not* sufficient for a production installation. 364 """ 365 366 manage(c, "runserver {address}".format(address=address), pty=True) 367 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tasks.py b/tasks.py --- a/tasks.py +++ b/tasks.py @@ -349,12 +349,22 @@ Create a backup of database models and uploaded media files. Backup files will be written to the 'backup_dir' file specified in 'config.yaml' - """ manage(c, 'dbbackup') manage(c, 'mediabackup') +@task +def restore(c): + """ + Restores database models and media files. + + Backup files are read from the 'backup_dir' file specified in 'config.yaml' + """ + + manage(c, 'dbrestore') + manage(c, 'mediarestore') + @task(help={'address': 'Server address:port (default=127.0.0.1:8000)'}) def server(c, address="127.0.0.1:8000"): """
{"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -349,12 +349,22 @@\n Create a backup of database models and uploaded media files.\n \n Backup files will be written to the 'backup_dir' file specified in 'config.yaml'\n-\n \"\"\"\n \n manage(c, 'dbbackup')\n manage(c, 'mediabackup')\n \n+@task\n+def restore(c):\n+ \"\"\"\n+ Restores database models and media files.\n+\n+ Backup files are read from the 'backup_dir' file specified in 'config.yaml'\n+ \"\"\"\n+\n+ manage(c, 'dbrestore')\n+ manage(c, 'mediarestore')\n+\n @task(help={'address': 'Server address:port (default=127.0.0.1:8000)'})\n def server(c, address=\"127.0.0.1:8000\"):\n \"\"\"\n", "issue": "Restore backup\nI have found several mentions of doing a backup in the documentation but no mention of how to perform the restore.\r\nIn my test instance I've configured \"backup_dir\" in config.yaml and performed a backup with \"inv backup\". Could not find a \"restore\" method defined in tasks.py\r\n\r\nIs there something I'm missing?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom invoke import task\nfrom shutil import copyfile\n\nimport random\nimport string\nimport os\nimport sys\n\ndef apps():\n \"\"\"\n Returns a list of installed apps\n \"\"\"\n\n return [\n 'barcode',\n 'build',\n 'common',\n 'company',\n 'label',\n 'order',\n 'part',\n 'report',\n 'stock',\n 'InvenTree',\n 'users',\n ]\n\ndef localDir():\n \"\"\"\n Returns the directory of *THIS* file.\n Used to ensure that the various scripts always run\n in the correct directory.\n \"\"\"\n return os.path.dirname(os.path.abspath(__file__))\n\ndef managePyDir():\n \"\"\"\n Returns the directory of the manage.py file\n \"\"\"\n\n return os.path.join(localDir(), 'InvenTree')\n\ndef managePyPath():\n \"\"\"\n Return the path of the manage.py file\n \"\"\"\n\n return os.path.join(managePyDir(), 'manage.py')\n\ndef manage(c, cmd, pty=False):\n \"\"\"\n Runs a given command against django's \"manage.py\" script.\n\n Args:\n c - Command line context\n cmd - django command to run\n \"\"\"\n\n c.run('cd {path} && python3 manage.py {cmd}'.format(\n path=managePyDir(),\n cmd=cmd\n ), pty=pty)\n\n@task(help={'length': 'Length of secret key (default=50)'})\ndef key(c, length=50, force=False):\n \"\"\"\n Generates a SECRET_KEY file which InvenTree uses for generating security hashes\n \"\"\"\n\n SECRET_KEY_FILE = os.path.join(localDir(), 'InvenTree', 'secret_key.txt')\n\n # If a SECRET_KEY file does not exist, generate a new one!\n if force or not os.path.exists(SECRET_KEY_FILE):\n print(\"Generating SECRET_KEY file - \" + SECRET_KEY_FILE)\n with open(SECRET_KEY_FILE, 'w') as key_file:\n options = string.digits + string.ascii_letters + string.punctuation\n\n key = ''.join([random.choice(options) for i in range(length)])\n\n key_file.write(key)\n\n else:\n print(\"SECRET_KEY file already exists - skipping\")\n\n\n@task(post=[key])\ndef install(c):\n \"\"\"\n Installs required python packages, and runs initial setup functions.\n \"\"\"\n\n # Install required Python packages with PIP\n c.run('pip3 install -U -r requirements.txt')\n\n # If a config.yaml file does not exist, copy from the template!\n CONFIG_FILE = os.path.join(localDir(), 'InvenTree', 'config.yaml')\n CONFIG_TEMPLATE_FILE = os.path.join(localDir(), 'InvenTree', 'config_template.yaml')\n\n if not os.path.exists(CONFIG_FILE):\n print(\"Config file 'config.yaml' does not exist - copying from template.\")\n copyfile(CONFIG_TEMPLATE_FILE, CONFIG_FILE)\n\n\n@task\ndef shell(c):\n \"\"\"\n Open a python shell with access to the InvenTree database models.\n \"\"\"\n\n manage(c, 'shell', pty=True)\n\n\n@task\ndef superuser(c):\n \"\"\"\n Create a superuser (admin) account for the database.\n \"\"\"\n\n manage(c, 'createsuperuser', pty=True)\n\n@task\ndef check(c):\n \"\"\"\n Check validity of django codebase\n \"\"\"\n\n manage(c, \"check\")\n\n@task\ndef migrate(c):\n \"\"\"\n Performs database migrations.\n This is a critical step if the database schema have been altered!\n \"\"\"\n\n print(\"Running InvenTree database migrations...\")\n print(\"========================================\")\n\n manage(c, \"makemigrations\")\n manage(c, \"migrate\")\n manage(c, \"migrate --run-syncdb\")\n manage(c, \"check\")\n\n print(\"========================================\")\n print(\"InvenTree database migrations completed!\")\n\n\n@task\ndef static(c):\n \"\"\"\n Copies required static files to the STATIC_ROOT directory,\n as per Django requirements.\n \"\"\"\n\n manage(c, \"collectstatic\")\n\n\n@task(pre=[install, migrate, static])\ndef update(c):\n \"\"\"\n Update InvenTree installation.\n\n This command should be invoked after source code has been updated,\n e.g. downloading new code from GitHub.\n\n The following tasks are performed, in order:\n\n - install\n - migrate\n - static\n \"\"\"\n pass\n\n@task\ndef translate(c):\n \"\"\"\n Regenerate translation files.\n\n Run this command after added new translatable strings,\n or after adding translations for existing strings.\n \"\"\"\n\n # Translate applicable .py / .html / .js files\n manage(c, \"makemessages -e py -e html -e js\")\n manage(c, \"compilemessages\")\n\n path = os.path.join('InvenTree', 'script', 'translation_stats.py')\n\n c.run(f'python {path}')\n\n@task\ndef style(c):\n \"\"\"\n Run PEP style checks against InvenTree sourcecode\n \"\"\"\n\n print(\"Running PEP style checks...\")\n c.run('flake8 InvenTree')\n\n@task\ndef test(c, database=None):\n \"\"\"\n Run unit-tests for InvenTree codebase.\n \"\"\"\n # Run sanity check on the django install\n manage(c, 'check')\n\n # Run coverage tests\n manage(c, 'test', pty=True)\n\n@task\ndef coverage(c):\n \"\"\"\n Run code-coverage of the InvenTree codebase,\n using the 'coverage' code-analysis tools.\n\n Generates a code coverage report (available in the htmlcov directory)\n \"\"\"\n\n # Run sanity check on the django install\n manage(c, 'check')\n\n # Run coverage tests\n c.run('coverage run {manage} test {apps}'.format(\n manage=managePyPath(),\n apps=' '.join(apps())\n ))\n\n # Generate coverage report\n c.run('coverage html')\n\n@task\ndef mysql(c):\n \"\"\"\n Install packages required for using InvenTree with a MySQL database.\n \"\"\"\n \n print('Installing packages required for MySQL')\n\n c.run('sudo apt-get install mysql-server libmysqlclient-dev')\n c.run('pip3 install mysqlclient')\n\n@task\ndef postgresql(c):\n \"\"\"\n Install packages required for using InvenTree with a PostgreSQL database\n \"\"\"\n\n print(\"Installing packages required for PostgreSQL\")\n\n c.run('sudo apt-get install postgresql postgresql-contrib libpq-dev')\n c.run('pip3 install psycopg2')\n\n@task(help={'filename': \"Output filename (default = 'data.json')\"})\ndef export_records(c, filename='data.json'):\n \"\"\"\n Export all database records to a file\n \"\"\"\n\n # Get an absolute path to the file\n if not os.path.isabs(filename):\n filename = os.path.join(localDir(), filename)\n filename = os.path.abspath(filename) \n\n print(f\"Exporting database records to file '{filename}'\")\n\n if os.path.exists(filename):\n response = input(\"Warning: file already exists. Do you want to overwrite? [y/N]: \")\n response = str(response).strip().lower()\n\n if response not in ['y', 'yes']:\n print(\"Cancelled export operation\")\n sys.exit(1)\n\n cmd = f'dumpdata --exclude contenttypes --exclude auth.permission --indent 2 --output {filename}'\n\n manage(c, cmd, pty=True)\n\n@task(help={'filename': 'Input filename'})\ndef import_records(c, filename='data.json'):\n \"\"\"\n Import database records from a file\n \"\"\"\n\n # Get an absolute path to the supplied filename\n if not os.path.isabs(filename):\n filename = os.path.join(localDir(), filename)\n\n if not os.path.exists(filename):\n print(f\"Error: File '{filename}' does not exist\")\n sys.exit(1)\n\n print(f\"Importing database records from '{filename}'\")\n\n cmd = f'loaddata {filename}'\n\n manage(c, cmd, pty=True)\n\n@task\ndef import_fixtures(c):\n \"\"\"\n Import fixture data into the database.\n\n This command imports all existing test fixture data into the database.\n\n Warning:\n - Intended for testing / development only!\n - Running this command may overwrite existing database data!!\n - Don't say you were not warned...\n \"\"\"\n\n fixtures = [\n # Build model\n 'build',\n \n # Common models\n 'settings',\n\n # Company model\n 'company',\n 'price_breaks',\n 'supplier_part',\n\n # Order model\n 'order',\n\n # Part model\n 'bom',\n 'category',\n 'params',\n 'part',\n 'test_templates',\n\n # Stock model\n 'location',\n 'stock_tests',\n 'stock',\n ]\n\n command = 'loaddata ' + ' '.join(fixtures)\n\n manage(c, command, pty=True)\n\n@task\ndef backup(c):\n \"\"\"\n Create a backup of database models and uploaded media files.\n\n Backup files will be written to the 'backup_dir' file specified in 'config.yaml'\n\n \"\"\"\n\n manage(c, 'dbbackup')\n manage(c, 'mediabackup')\n\n@task(help={'address': 'Server address:port (default=127.0.0.1:8000)'})\ndef server(c, address=\"127.0.0.1:8000\"):\n \"\"\"\n Launch a (deveopment) server using Django's in-built webserver.\n\n Note: This is *not* sufficient for a production installation.\n \"\"\"\n\n manage(c, \"runserver {address}\".format(address=address), pty=True)\n", "path": "tasks.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom invoke import task\nfrom shutil import copyfile\n\nimport random\nimport string\nimport os\nimport sys\n\ndef apps():\n \"\"\"\n Returns a list of installed apps\n \"\"\"\n\n return [\n 'barcode',\n 'build',\n 'common',\n 'company',\n 'label',\n 'order',\n 'part',\n 'report',\n 'stock',\n 'InvenTree',\n 'users',\n ]\n\ndef localDir():\n \"\"\"\n Returns the directory of *THIS* file.\n Used to ensure that the various scripts always run\n in the correct directory.\n \"\"\"\n return os.path.dirname(os.path.abspath(__file__))\n\ndef managePyDir():\n \"\"\"\n Returns the directory of the manage.py file\n \"\"\"\n\n return os.path.join(localDir(), 'InvenTree')\n\ndef managePyPath():\n \"\"\"\n Return the path of the manage.py file\n \"\"\"\n\n return os.path.join(managePyDir(), 'manage.py')\n\ndef manage(c, cmd, pty=False):\n \"\"\"\n Runs a given command against django's \"manage.py\" script.\n\n Args:\n c - Command line context\n cmd - django command to run\n \"\"\"\n\n c.run('cd {path} && python3 manage.py {cmd}'.format(\n path=managePyDir(),\n cmd=cmd\n ), pty=pty)\n\n@task(help={'length': 'Length of secret key (default=50)'})\ndef key(c, length=50, force=False):\n \"\"\"\n Generates a SECRET_KEY file which InvenTree uses for generating security hashes\n \"\"\"\n\n SECRET_KEY_FILE = os.path.join(localDir(), 'InvenTree', 'secret_key.txt')\n\n # If a SECRET_KEY file does not exist, generate a new one!\n if force or not os.path.exists(SECRET_KEY_FILE):\n print(\"Generating SECRET_KEY file - \" + SECRET_KEY_FILE)\n with open(SECRET_KEY_FILE, 'w') as key_file:\n options = string.digits + string.ascii_letters + string.punctuation\n\n key = ''.join([random.choice(options) for i in range(length)])\n\n key_file.write(key)\n\n else:\n print(\"SECRET_KEY file already exists - skipping\")\n\n\n@task(post=[key])\ndef install(c):\n \"\"\"\n Installs required python packages, and runs initial setup functions.\n \"\"\"\n\n # Install required Python packages with PIP\n c.run('pip3 install -U -r requirements.txt')\n\n # If a config.yaml file does not exist, copy from the template!\n CONFIG_FILE = os.path.join(localDir(), 'InvenTree', 'config.yaml')\n CONFIG_TEMPLATE_FILE = os.path.join(localDir(), 'InvenTree', 'config_template.yaml')\n\n if not os.path.exists(CONFIG_FILE):\n print(\"Config file 'config.yaml' does not exist - copying from template.\")\n copyfile(CONFIG_TEMPLATE_FILE, CONFIG_FILE)\n\n\n@task\ndef shell(c):\n \"\"\"\n Open a python shell with access to the InvenTree database models.\n \"\"\"\n\n manage(c, 'shell', pty=True)\n\n\n@task\ndef superuser(c):\n \"\"\"\n Create a superuser (admin) account for the database.\n \"\"\"\n\n manage(c, 'createsuperuser', pty=True)\n\n@task\ndef check(c):\n \"\"\"\n Check validity of django codebase\n \"\"\"\n\n manage(c, \"check\")\n\n@task\ndef migrate(c):\n \"\"\"\n Performs database migrations.\n This is a critical step if the database schema have been altered!\n \"\"\"\n\n print(\"Running InvenTree database migrations...\")\n print(\"========================================\")\n\n manage(c, \"makemigrations\")\n manage(c, \"migrate\")\n manage(c, \"migrate --run-syncdb\")\n manage(c, \"check\")\n\n print(\"========================================\")\n print(\"InvenTree database migrations completed!\")\n\n\n@task\ndef static(c):\n \"\"\"\n Copies required static files to the STATIC_ROOT directory,\n as per Django requirements.\n \"\"\"\n\n manage(c, \"collectstatic\")\n\n\n@task(pre=[install, migrate, static])\ndef update(c):\n \"\"\"\n Update InvenTree installation.\n\n This command should be invoked after source code has been updated,\n e.g. downloading new code from GitHub.\n\n The following tasks are performed, in order:\n\n - install\n - migrate\n - static\n \"\"\"\n pass\n\n@task\ndef translate(c):\n \"\"\"\n Regenerate translation files.\n\n Run this command after added new translatable strings,\n or after adding translations for existing strings.\n \"\"\"\n\n # Translate applicable .py / .html / .js files\n manage(c, \"makemessages -e py -e html -e js\")\n manage(c, \"compilemessages\")\n\n path = os.path.join('InvenTree', 'script', 'translation_stats.py')\n\n c.run(f'python {path}')\n\n@task\ndef style(c):\n \"\"\"\n Run PEP style checks against InvenTree sourcecode\n \"\"\"\n\n print(\"Running PEP style checks...\")\n c.run('flake8 InvenTree')\n\n@task\ndef test(c, database=None):\n \"\"\"\n Run unit-tests for InvenTree codebase.\n \"\"\"\n # Run sanity check on the django install\n manage(c, 'check')\n\n # Run coverage tests\n manage(c, 'test', pty=True)\n\n@task\ndef coverage(c):\n \"\"\"\n Run code-coverage of the InvenTree codebase,\n using the 'coverage' code-analysis tools.\n\n Generates a code coverage report (available in the htmlcov directory)\n \"\"\"\n\n # Run sanity check on the django install\n manage(c, 'check')\n\n # Run coverage tests\n c.run('coverage run {manage} test {apps}'.format(\n manage=managePyPath(),\n apps=' '.join(apps())\n ))\n\n # Generate coverage report\n c.run('coverage html')\n\n@task\ndef mysql(c):\n \"\"\"\n Install packages required for using InvenTree with a MySQL database.\n \"\"\"\n \n print('Installing packages required for MySQL')\n\n c.run('sudo apt-get install mysql-server libmysqlclient-dev')\n c.run('pip3 install mysqlclient')\n\n@task\ndef postgresql(c):\n \"\"\"\n Install packages required for using InvenTree with a PostgreSQL database\n \"\"\"\n\n print(\"Installing packages required for PostgreSQL\")\n\n c.run('sudo apt-get install postgresql postgresql-contrib libpq-dev')\n c.run('pip3 install psycopg2')\n\n@task(help={'filename': \"Output filename (default = 'data.json')\"})\ndef export_records(c, filename='data.json'):\n \"\"\"\n Export all database records to a file\n \"\"\"\n\n # Get an absolute path to the file\n if not os.path.isabs(filename):\n filename = os.path.join(localDir(), filename)\n filename = os.path.abspath(filename) \n\n print(f\"Exporting database records to file '{filename}'\")\n\n if os.path.exists(filename):\n response = input(\"Warning: file already exists. Do you want to overwrite? [y/N]: \")\n response = str(response).strip().lower()\n\n if response not in ['y', 'yes']:\n print(\"Cancelled export operation\")\n sys.exit(1)\n\n cmd = f'dumpdata --exclude contenttypes --exclude auth.permission --indent 2 --output {filename}'\n\n manage(c, cmd, pty=True)\n\n@task(help={'filename': 'Input filename'})\ndef import_records(c, filename='data.json'):\n \"\"\"\n Import database records from a file\n \"\"\"\n\n # Get an absolute path to the supplied filename\n if not os.path.isabs(filename):\n filename = os.path.join(localDir(), filename)\n\n if not os.path.exists(filename):\n print(f\"Error: File '{filename}' does not exist\")\n sys.exit(1)\n\n print(f\"Importing database records from '{filename}'\")\n\n cmd = f'loaddata {filename}'\n\n manage(c, cmd, pty=True)\n\n@task\ndef import_fixtures(c):\n \"\"\"\n Import fixture data into the database.\n\n This command imports all existing test fixture data into the database.\n\n Warning:\n - Intended for testing / development only!\n - Running this command may overwrite existing database data!!\n - Don't say you were not warned...\n \"\"\"\n\n fixtures = [\n # Build model\n 'build',\n \n # Common models\n 'settings',\n\n # Company model\n 'company',\n 'price_breaks',\n 'supplier_part',\n\n # Order model\n 'order',\n\n # Part model\n 'bom',\n 'category',\n 'params',\n 'part',\n 'test_templates',\n\n # Stock model\n 'location',\n 'stock_tests',\n 'stock',\n ]\n\n command = 'loaddata ' + ' '.join(fixtures)\n\n manage(c, command, pty=True)\n\n@task\ndef backup(c):\n \"\"\"\n Create a backup of database models and uploaded media files.\n\n Backup files will be written to the 'backup_dir' file specified in 'config.yaml'\n \"\"\"\n\n manage(c, 'dbbackup')\n manage(c, 'mediabackup')\n\n@task\ndef restore(c):\n \"\"\"\n Restores database models and media files.\n\n Backup files are read from the 'backup_dir' file specified in 'config.yaml'\n \"\"\"\n\n manage(c, 'dbrestore')\n manage(c, 'mediarestore')\n\n@task(help={'address': 'Server address:port (default=127.0.0.1:8000)'})\ndef server(c, address=\"127.0.0.1:8000\"):\n \"\"\"\n Launch a (deveopment) server using Django's in-built webserver.\n\n Note: This is *not* sufficient for a production installation.\n \"\"\"\n\n manage(c, \"runserver {address}\".format(address=address), pty=True)\n", "path": "tasks.py"}]}
3,448
210
gh_patches_debug_41148
rasdani/github-patches
git_diff
rasterio__rasterio-915
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- In-memory GDAL datasets, sqlite style GDAL has a concept of in-memory datasets that you create by passing in a filename like `/vsimem/example.tif`. There's a per-process in-memory filesystem at `/vsimem` and a TIFF file in it at `/vsimem/example.tif`. For Rasterio's purposes it could be better to follow the example of sqlite and simply pass, say, `:memory:`. Rasterio could generate a UUID and an appropriate VSIMEM filename to pass to GDAL. I'm thinking that this could be used to simply the existing code in rio-mbtiles ``` python with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp: reproject(rasterio.band(src, src.indexes), rasterio.band(tmp, tmp.indexes)) data = bytearray(virtual_file_to_buffer('/vsimem/tileimg')) # Workaround for https://bugs.python.org/issue23349. if sys.version_info[0] == 2 and sys.version_info[2] < 10: data[:] = data[-1:] + data[:-1] return tile, data ``` down to ``` python with rasterio.open(':memory:', 'w', **kwds) as tmp: reproject(rasterio.band(src, src.indexes), rasterio.band(tmp, tmp.indexes)) data = tmp.to_bytes() return tile, data ``` Benefits: - More appropriate abstraction for Rasterio, where `open()` is supposed to be the sole access to datasets. - Prevents mixups on the vsimem filesystem that could be hard to recover from and debug. - We can workaround the Python bug for all users within `to_bytes()`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/io.py` Content: ``` 1 """Classes capable of reading and writing datasets 2 3 Instances of these classes are called dataset objects. 4 """ 5 6 import math 7 import warnings 8 9 from rasterio._base import ( 10 get_dataset_driver, driver_can_create, driver_can_create_copy) 11 from rasterio._io import ( 12 DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase) 13 from rasterio import enums, windows 14 from rasterio.transform import guard_transform, xy, rowcol 15 16 17 class TransformMethodsMixin(object): 18 """Mixin providing methods for calculations related 19 to transforming between rows and columns of the raster 20 array and the coordinates. 21 22 These methods are wrappers for the functionality in 23 `rasterio.transform` module. 24 25 A subclass with this mixin MUST provide a `transform` 26 property. 27 """ 28 29 def xy(self, row, col, offset="center"): 30 """Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`. 31 The pixel's center is returned by default, but a corner can be returned 32 by setting `offset` to one of `ul, ur, ll, lr`. 33 34 Parameters 35 ---------- 36 row : int 37 Pixel row. 38 col : int 39 Pixel column. 40 offset : str, optional 41 Determines if the returned coordinates are for the center of the 42 pixel or for a corner. 43 44 Returns 45 ------- 46 tuple 47 ``(x, y)`` 48 """ 49 return xy(self.transform, row, col, offset=offset) 50 51 def ul(self, row, col): 52 """Returns the coordinates (x, y) of the upper left corner of a 53 pixel at `row` and `col` in the units of the dataset's 54 coordinate reference system. 55 56 Deprecated; Use `xy(row, col, offset='ul')` instead. 57 """ 58 warnings.warn("ul method is deprecated. Use xy(row, col, offset='ul')", 59 DeprecationWarning) 60 return xy(self.transform, row, col, offset='ul') 61 62 def index(self, x, y, op=math.floor, precision=6): 63 """ 64 Returns the (row, col) index of the pixel containing (x, y) given a 65 coordinate reference system. 66 67 Use an epsilon, magnitude determined by the precision parameter 68 and sign determined by the op function: 69 positive for floor, negative for ceil. 70 71 Parameters 72 ---------- 73 x : float 74 x value in coordinate reference system 75 y : float 76 y value in coordinate reference system 77 op : function, optional (default: math.floor) 78 Function to convert fractional pixels to whole numbers (floor, 79 ceiling, round) 80 precision : int, optional (default: 6) 81 Decimal places of precision in indexing, as in `round()`. 82 83 Returns 84 ------- 85 tuple 86 (row index, col index) 87 """ 88 return rowcol(self.transform, x, y, op=op, precision=precision) 89 90 91 class WindowMethodsMixin(object): 92 """Mixin providing methods for window-related calculations. 93 These methods are wrappers for the functionality in 94 `rasterio.windows` module. 95 96 A subclass with this mixin MUST provide the following 97 properties: `transform`, `height` and `width` 98 """ 99 100 def window(self, left, bottom, right, top, boundless=False): 101 """Get the window corresponding to the bounding coordinates. 102 103 Parameters 104 ---------- 105 left : float 106 Left (west) bounding coordinate 107 bottom : float 108 Bottom (south) bounding coordinate 109 right : float 110 Right (east) bounding coordinate 111 top : float 112 Top (north) bounding coordinate 113 boundless: boolean, optional 114 If boundless is False, window is limited 115 to extent of this dataset. 116 117 Returns 118 ------- 119 window: tuple 120 ((row_start, row_stop), (col_start, col_stop)) 121 corresponding to the bounding coordinates 122 123 """ 124 125 transform = guard_transform(self.transform) 126 return windows.from_bounds( 127 left, bottom, right, top, transform=transform, 128 height=self.height, width=self.width, boundless=boundless) 129 130 def window_transform(self, window): 131 """Get the affine transform for a dataset window. 132 133 Parameters 134 ---------- 135 window: tuple 136 Dataset window tuple 137 138 Returns 139 ------- 140 transform: Affine 141 The affine transform matrix for the given window 142 """ 143 144 transform = guard_transform(self.transform) 145 return windows.transform(window, transform) 146 147 def window_bounds(self, window): 148 """Get the bounds of a window 149 150 Parameters 151 ---------- 152 window: tuple 153 Dataset window tuple 154 155 Returns 156 ------- 157 bounds : tuple 158 x_min, y_min, x_max, y_max for the given window 159 """ 160 161 transform = guard_transform(self.transform) 162 return windows.bounds(window, transform) 163 164 165 class DatasetReader(DatasetReaderBase, WindowMethodsMixin, 166 TransformMethodsMixin): 167 """An unbuffered data and metadata reader""" 168 169 def __repr__(self): 170 return "<{} DatasetReader name='{}' mode='{}'>".format( 171 self.closed and 'closed' or 'open', self.name, self.mode) 172 173 class DatasetWriter(DatasetWriterBase, WindowMethodsMixin, 174 TransformMethodsMixin): 175 """An unbuffered data and metadata writer. Its methods write data 176 directly to disk. 177 """ 178 179 def __repr__(self): 180 return "<{} DatasetWriter name='{}' mode='{}'>".format( 181 self.closed and 'closed' or 'open', self.name, self.mode) 182 183 184 class BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin, 185 TransformMethodsMixin): 186 """Maintains data and metadata in a buffer, writing to disk or 187 network only when `close()` is called. 188 189 This allows incremental updates to datasets using formats that don't 190 otherwise support updates, such as JPEG. 191 """ 192 193 def __repr__(self): 194 return "<{} BufferedDatasetWriter name='{}' mode='{}'>".format( 195 self.closed and 'closed' or 'open', self.name, self.mode) 196 197 198 def get_writer_for_driver(driver): 199 """Return the writer class appropriate for the specified driver.""" 200 cls = None 201 if driver_can_create(driver): 202 cls = DatasetWriter 203 elif driver_can_create_copy(driver): # pragma: no branch 204 cls = BufferedDatasetWriter 205 return cls 206 207 208 def get_writer_for_path(path): 209 """Return the writer class appropriate for the existing dataset.""" 210 driver = get_dataset_driver(path) 211 return get_writer_for_driver(driver) 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/io.py b/rasterio/io.py --- a/rasterio/io.py +++ b/rasterio/io.py @@ -3,17 +3,23 @@ Instances of these classes are called dataset objects. """ +import logging import math +import uuid import warnings from rasterio._base import ( get_dataset_driver, driver_can_create, driver_can_create_copy) from rasterio._io import ( - DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase) + DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase, + MemoryFileBase) from rasterio import enums, windows from rasterio.transform import guard_transform, xy, rowcol +log = logging.getLogger(__name__) + + class TransformMethodsMixin(object): """Mixin providing methods for calculations related to transforming between rows and columns of the raster @@ -170,6 +176,7 @@ return "<{} DatasetReader name='{}' mode='{}'>".format( self.closed and 'closed' or 'open', self.name, self.mode) + class DatasetWriter(DatasetWriterBase, WindowMethodsMixin, TransformMethodsMixin): """An unbuffered data and metadata writer. Its methods write data @@ -195,6 +202,68 @@ self.closed and 'closed' or 'open', self.name, self.mode) +class MemoryFile(MemoryFileBase): + """A BytesIO-like object, backed by an in-memory file. + + This allows formatted files to be read and written without I/O. + + A MemoryFile created with initial bytes becomes immutable. A + MemoryFile created without initial bytes may be written to using + either file-like or dataset interfaces. + + Examples + -------- + + A GeoTIFF can be loaded in memory and accessed using the GeoTIFF + format driver + + >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \ + ... MemoryFile(f.read()) as memfile: + ... with memfile.open() as src: + ... pprint.pprint(src.profile) + ... + {'count': 3, + 'crs': CRS({'init': 'epsg:32618'}), + 'driver': 'GTiff', + 'dtype': 'uint8', + 'height': 718, + 'interleave': 'pixel', + 'nodata': 0.0, + 'tiled': False, + 'transform': Affine(300.0379266750948, 0.0, 101985.0, + 0.0, -300.041782729805, 2826915.0), + 'width': 791} + + """ + + def open(self, driver=None, width=None, height=None, + count=None, crs=None, transform=None, dtype=None, nodata=None, + **kwargs): + """Open the file and return a Rasterio dataset object. + + If data has already been written, the file is opened in 'r+' + mode. Otherwise, the file is opened in 'w' mode. + """ + if self.closed: + raise IOError("I/O operation on closed file.") + if self.exists(): + s = get_writer_for_path(self.name)(self.name, 'r+') + else: + s = get_writer_for_driver(driver)(self.name, 'w', driver=driver, + width=width, height=height, + count=count, crs=crs, + transform=transform, dtype=dtype, + nodata=nodata, **kwargs) + s.start() + return s + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def get_writer_for_driver(driver): """Return the writer class appropriate for the specified driver.""" cls = None
{"golden_diff": "diff --git a/rasterio/io.py b/rasterio/io.py\n--- a/rasterio/io.py\n+++ b/rasterio/io.py\n@@ -3,17 +3,23 @@\n Instances of these classes are called dataset objects.\n \"\"\"\n \n+import logging\n import math\n+import uuid\n import warnings\n \n from rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\n from rasterio._io import (\n- DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase)\n+ DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n+ MemoryFileBase)\n from rasterio import enums, windows\n from rasterio.transform import guard_transform, xy, rowcol\n \n \n+log = logging.getLogger(__name__)\n+\n+\n class TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n@@ -170,6 +176,7 @@\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n \n+\n class DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n@@ -195,6 +202,68 @@\n self.closed and 'closed' or 'open', self.name, self.mode)\n \n \n+class MemoryFile(MemoryFileBase):\n+ \"\"\"A BytesIO-like object, backed by an in-memory file.\n+\n+ This allows formatted files to be read and written without I/O.\n+\n+ A MemoryFile created with initial bytes becomes immutable. A\n+ MemoryFile created without initial bytes may be written to using\n+ either file-like or dataset interfaces.\n+\n+ Examples\n+ --------\n+\n+ A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n+ format driver\n+\n+ >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \\\n+ ... MemoryFile(f.read()) as memfile:\n+ ... with memfile.open() as src:\n+ ... pprint.pprint(src.profile)\n+ ...\n+ {'count': 3,\n+ 'crs': CRS({'init': 'epsg:32618'}),\n+ 'driver': 'GTiff',\n+ 'dtype': 'uint8',\n+ 'height': 718,\n+ 'interleave': 'pixel',\n+ 'nodata': 0.0,\n+ 'tiled': False,\n+ 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n+ 0.0, -300.041782729805, 2826915.0),\n+ 'width': 791}\n+\n+ \"\"\"\n+\n+ def open(self, driver=None, width=None, height=None,\n+ count=None, crs=None, transform=None, dtype=None, nodata=None,\n+ **kwargs):\n+ \"\"\"Open the file and return a Rasterio dataset object.\n+\n+ If data has already been written, the file is opened in 'r+'\n+ mode. Otherwise, the file is opened in 'w' mode.\n+ \"\"\"\n+ if self.closed:\n+ raise IOError(\"I/O operation on closed file.\")\n+ if self.exists():\n+ s = get_writer_for_path(self.name)(self.name, 'r+')\n+ else:\n+ s = get_writer_for_driver(driver)(self.name, 'w', driver=driver,\n+ width=width, height=height,\n+ count=count, crs=crs,\n+ transform=transform, dtype=dtype,\n+ nodata=nodata, **kwargs)\n+ s.start()\n+ return s\n+\n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, *args, **kwargs):\n+ self.close()\n+\n+\n def get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n", "issue": "In-memory GDAL datasets, sqlite style\nGDAL has a concept of in-memory datasets that you create by passing in a filename like `/vsimem/example.tif`. There's a per-process in-memory filesystem at `/vsimem` and a TIFF file in it at `/vsimem/example.tif`. For Rasterio's purposes it could be better to follow the example of sqlite and simply pass, say, `:memory:`. Rasterio could generate a UUID and an appropriate VSIMEM filename to pass to GDAL.\n\nI'm thinking that this could be used to simply the existing code in rio-mbtiles\n\n``` python\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes))\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data\n```\n\ndown to \n\n``` python\n with rasterio.open(':memory:', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes))\n data = tmp.to_bytes()\n\n return tile, data\n```\n\nBenefits:\n- More appropriate abstraction for Rasterio, where `open()` is supposed to be the sole access to datasets.\n- Prevents mixups on the vsimem filesystem that could be hard to recover from and debug.\n- We can workaround the Python bug for all users within `to_bytes()`.\n\n", "before_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport math\nimport warnings\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase)\nfrom rasterio import enums, windows\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, boundless=False):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n Parameters\n ----------\n left : float\n Left (west) bounding coordinate\n bottom : float\n Bottom (south) bounding coordinate\n right : float\n Right (east) bounding coordinate\n top : float\n Top (north) bounding coordinate\n boundless: boolean, optional\n If boundless is False, window is limited\n to extent of this dataset.\n\n Returns\n -------\n window: tuple\n ((row_start, row_stop), (col_start, col_stop))\n corresponding to the bounding coordinates\n\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, boundless=boundless)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}], "after_files": [{"content": "\"\"\"Classes capable of reading and writing datasets\n\nInstances of these classes are called dataset objects.\n\"\"\"\n\nimport logging\nimport math\nimport uuid\nimport warnings\n\nfrom rasterio._base import (\n get_dataset_driver, driver_can_create, driver_can_create_copy)\nfrom rasterio._io import (\n DatasetReaderBase, DatasetWriterBase, BufferedDatasetWriterBase,\n MemoryFileBase)\nfrom rasterio import enums, windows\nfrom rasterio.transform import guard_transform, xy, rowcol\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TransformMethodsMixin(object):\n \"\"\"Mixin providing methods for calculations related\n to transforming between rows and columns of the raster\n array and the coordinates.\n\n These methods are wrappers for the functionality in\n `rasterio.transform` module.\n\n A subclass with this mixin MUST provide a `transform`\n property.\n \"\"\"\n\n def xy(self, row, col, offset=\"center\"):\n \"\"\"Returns the coordinates ``(x, y)`` of a pixel at `row` and `col`.\n The pixel's center is returned by default, but a corner can be returned\n by setting `offset` to one of `ul, ur, ll, lr`.\n\n Parameters\n ----------\n row : int\n Pixel row.\n col : int\n Pixel column.\n offset : str, optional\n Determines if the returned coordinates are for the center of the\n pixel or for a corner.\n\n Returns\n -------\n tuple\n ``(x, y)``\n \"\"\"\n return xy(self.transform, row, col, offset=offset)\n\n def ul(self, row, col):\n \"\"\"Returns the coordinates (x, y) of the upper left corner of a\n pixel at `row` and `col` in the units of the dataset's\n coordinate reference system.\n\n Deprecated; Use `xy(row, col, offset='ul')` instead.\n \"\"\"\n warnings.warn(\"ul method is deprecated. Use xy(row, col, offset='ul')\",\n DeprecationWarning)\n return xy(self.transform, row, col, offset='ul')\n\n def index(self, x, y, op=math.floor, precision=6):\n \"\"\"\n Returns the (row, col) index of the pixel containing (x, y) given a\n coordinate reference system.\n\n Use an epsilon, magnitude determined by the precision parameter\n and sign determined by the op function:\n positive for floor, negative for ceil.\n\n Parameters\n ----------\n x : float\n x value in coordinate reference system\n y : float\n y value in coordinate reference system\n op : function, optional (default: math.floor)\n Function to convert fractional pixels to whole numbers (floor,\n ceiling, round)\n precision : int, optional (default: 6)\n Decimal places of precision in indexing, as in `round()`.\n\n Returns\n -------\n tuple\n (row index, col index)\n \"\"\"\n return rowcol(self.transform, x, y, op=op, precision=precision)\n\n\nclass WindowMethodsMixin(object):\n \"\"\"Mixin providing methods for window-related calculations.\n These methods are wrappers for the functionality in\n `rasterio.windows` module.\n\n A subclass with this mixin MUST provide the following\n properties: `transform`, `height` and `width`\n \"\"\"\n\n def window(self, left, bottom, right, top, boundless=False):\n \"\"\"Get the window corresponding to the bounding coordinates.\n\n Parameters\n ----------\n left : float\n Left (west) bounding coordinate\n bottom : float\n Bottom (south) bounding coordinate\n right : float\n Right (east) bounding coordinate\n top : float\n Top (north) bounding coordinate\n boundless: boolean, optional\n If boundless is False, window is limited\n to extent of this dataset.\n\n Returns\n -------\n window: tuple\n ((row_start, row_stop), (col_start, col_stop))\n corresponding to the bounding coordinates\n\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.from_bounds(\n left, bottom, right, top, transform=transform,\n height=self.height, width=self.width, boundless=boundless)\n\n def window_transform(self, window):\n \"\"\"Get the affine transform for a dataset window.\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n transform: Affine\n The affine transform matrix for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.transform(window, transform)\n\n def window_bounds(self, window):\n \"\"\"Get the bounds of a window\n\n Parameters\n ----------\n window: tuple\n Dataset window tuple\n\n Returns\n -------\n bounds : tuple\n x_min, y_min, x_max, y_max for the given window\n \"\"\"\n\n transform = guard_transform(self.transform)\n return windows.bounds(window, transform)\n\n\nclass DatasetReader(DatasetReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata reader\"\"\"\n\n def __repr__(self):\n return \"<{} DatasetReader name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass DatasetWriter(DatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"An unbuffered data and metadata writer. Its methods write data\n directly to disk.\n \"\"\"\n\n def __repr__(self):\n return \"<{} DatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass BufferedDatasetWriter(BufferedDatasetWriterBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"Maintains data and metadata in a buffer, writing to disk or\n network only when `close()` is called.\n\n This allows incremental updates to datasets using formats that don't\n otherwise support updates, such as JPEG.\n \"\"\"\n\n def __repr__(self):\n return \"<{} BufferedDatasetWriter name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n\nclass MemoryFile(MemoryFileBase):\n \"\"\"A BytesIO-like object, backed by an in-memory file.\n\n This allows formatted files to be read and written without I/O.\n\n A MemoryFile created with initial bytes becomes immutable. A\n MemoryFile created without initial bytes may be written to using\n either file-like or dataset interfaces.\n\n Examples\n --------\n\n A GeoTIFF can be loaded in memory and accessed using the GeoTIFF\n format driver\n\n >>> with open('tests/data/RGB.byte.tif', 'rb') as f, \\\n ... MemoryFile(f.read()) as memfile:\n ... with memfile.open() as src:\n ... pprint.pprint(src.profile)\n ...\n {'count': 3,\n 'crs': CRS({'init': 'epsg:32618'}),\n 'driver': 'GTiff',\n 'dtype': 'uint8',\n 'height': 718,\n 'interleave': 'pixel',\n 'nodata': 0.0,\n 'tiled': False,\n 'transform': Affine(300.0379266750948, 0.0, 101985.0,\n 0.0, -300.041782729805, 2826915.0),\n 'width': 791}\n\n \"\"\"\n\n def open(self, driver=None, width=None, height=None,\n count=None, crs=None, transform=None, dtype=None, nodata=None,\n **kwargs):\n \"\"\"Open the file and return a Rasterio dataset object.\n\n If data has already been written, the file is opened in 'r+'\n mode. Otherwise, the file is opened in 'w' mode.\n \"\"\"\n if self.closed:\n raise IOError(\"I/O operation on closed file.\")\n if self.exists():\n s = get_writer_for_path(self.name)(self.name, 'r+')\n else:\n s = get_writer_for_driver(driver)(self.name, 'w', driver=driver,\n width=width, height=height,\n count=count, crs=crs,\n transform=transform, dtype=dtype,\n nodata=nodata, **kwargs)\n s.start()\n return s\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n\ndef get_writer_for_driver(driver):\n \"\"\"Return the writer class appropriate for the specified driver.\"\"\"\n cls = None\n if driver_can_create(driver):\n cls = DatasetWriter\n elif driver_can_create_copy(driver): # pragma: no branch\n cls = BufferedDatasetWriter\n return cls\n\n\ndef get_writer_for_path(path):\n \"\"\"Return the writer class appropriate for the existing dataset.\"\"\"\n driver = get_dataset_driver(path)\n return get_writer_for_driver(driver)\n", "path": "rasterio/io.py"}]}
2,584
923
gh_patches_debug_14729
rasdani/github-patches
git_diff
dotkom__onlineweb4-612
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Gravatar default images do not load correctly --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/authentication/templatetags/gravatar_url_resolver.py` Content: ``` 1 from django import template 2 from django.conf import settings 3 import urllib, hashlib 4 5 register = template.Library() 6 7 @register.assignment_tag(takes_context=True) 8 def gravatar_url(context, user, size): 9 default = "%s%s_%s.png" % (context['request'].META['HTTP_HOST'], 10 settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender) 11 gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.get_email().email).hexdigest() + "?" 12 gravatar_url += urllib.urlencode({'d': default, 's':str(size)}) 13 14 return gravatar_url ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py --- a/apps/authentication/templatetags/gravatar_url_resolver.py +++ b/apps/authentication/templatetags/gravatar_url_resolver.py @@ -6,8 +6,11 @@ @register.assignment_tag(takes_context=True) def gravatar_url(context, user, size): - default = "%s%s_%s.png" % (context['request'].META['HTTP_HOST'], + + prefix = "https://" if context['request'].is_secure() else "http://" + default = "%s%s%s_%s.png" % (prefix, context['request'].META['HTTP_HOST'], settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender) + gravatar_url = "https://www.gravatar.com/avatar/" + hashlib.md5(user.get_email().email).hexdigest() + "?" gravatar_url += urllib.urlencode({'d': default, 's':str(size)})
{"golden_diff": "diff --git a/apps/authentication/templatetags/gravatar_url_resolver.py b/apps/authentication/templatetags/gravatar_url_resolver.py\n--- a/apps/authentication/templatetags/gravatar_url_resolver.py\n+++ b/apps/authentication/templatetags/gravatar_url_resolver.py\n@@ -6,8 +6,11 @@\n \n @register.assignment_tag(takes_context=True)\n def gravatar_url(context, user, size):\n- default = \"%s%s_%s.png\" % (context['request'].META['HTTP_HOST'],\n+\n+ prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n+ default = \"%s%s%s_%s.png\" % (prefix, context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender)\n+\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.get_email().email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n", "issue": "Gravatar default images do not load correctly\n\n", "before_files": [{"content": "from django import template\nfrom django.conf import settings\nimport urllib, hashlib\n \nregister = template.Library()\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n default = \"%s%s_%s.png\" % (context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender)\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.get_email().email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n\n return gravatar_url", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}], "after_files": [{"content": "from django import template\nfrom django.conf import settings\nimport urllib, hashlib\n \nregister = template.Library()\n\[email protected]_tag(takes_context=True)\ndef gravatar_url(context, user, size):\n\n prefix = \"https://\" if context['request'].is_secure() else \"http://\"\n default = \"%s%s%s_%s.png\" % (prefix, context['request'].META['HTTP_HOST'],\n settings.DEFAULT_PROFILE_PICTURE_PREFIX, user.gender)\n\n gravatar_url = \"https://www.gravatar.com/avatar/\" + hashlib.md5(user.get_email().email).hexdigest() + \"?\"\n gravatar_url += urllib.urlencode({'d': default, 's':str(size)})\n\n return gravatar_url", "path": "apps/authentication/templatetags/gravatar_url_resolver.py"}]}
421
218
gh_patches_debug_11119
rasdani/github-patches
git_diff
microsoft__ptvsd-1853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deferred server events are not always propagated Handlers for "launch" and "attach" requests in the adapter don't lock the session correctly. As a result, there's a race condition between the code that replays server events after responding to those requests, and the code that queues up server events for replaying - and it's possible for an event to be deferred after events have already been replayed, which results in it never being propagated. "process" event is the one this affects most often, and when that happens, it breaks the debug session. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/ptvsd/adapter/ide.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 from __future__ import absolute_import, print_function, unicode_literals 6 7 import platform 8 9 import ptvsd 10 from ptvsd.common import json, log, messaging 11 from ptvsd.common.compat import unicode 12 from ptvsd.adapter import components 13 14 15 class IDE(components.Component): 16 """Handles the IDE side of a debug session.""" 17 18 message_handler = components.Component.message_handler 19 20 class Capabilities(components.Capabilities): 21 PROPERTIES = { 22 "supportsVariableType": False, 23 "supportsVariablePaging": False, 24 "supportsRunInTerminalRequest": False, 25 "supportsMemoryReferences": False, 26 } 27 28 class Expectations(components.Capabilities): 29 PROPERTIES = { 30 "locale": "en-US", 31 "linesStartAt1": True, 32 "columnsStartAt1": True, 33 "pathFormat": json.enum("path"), # we don't support "uri" 34 } 35 36 def __init__(self, session, stream): 37 super(IDE, self).__init__(session, stream) 38 39 self.client_id = None 40 """ID of the connecting client. This can be 'test' while running tests.""" 41 42 self._initialize_request = None 43 """The "initialize" request as received from the IDE, to propagate to the 44 server later.""" 45 46 self._deferred_events = [] 47 """Deferred events from the launcher and the server that must be propagated 48 only if and when the "launch" or "attach" response is sent. 49 """ 50 51 assert not session.ide 52 session.ide = self 53 54 self.channel.send_event( 55 "output", 56 { 57 "category": "telemetry", 58 "output": "ptvsd.adapter", 59 "data": {"packageVersion": ptvsd.__version__}, 60 }, 61 ) 62 63 def propagate_after_start(self, event): 64 # pydevd starts sending events as soon as we connect, but the IDE doesn't 65 # expect to see any until it receives the response to "launch" or "attach" 66 # request. If IDE is not ready yet, save the event instead of propagating 67 # it immediately. 68 if self._deferred_events is not None: 69 self._deferred_events.append(event) 70 log.debug("Propagation deferred.") 71 else: 72 self.ide.channel.propagate(event) 73 74 def _propagate_deferred_events(self): 75 log.debug("Propagating deferred events to {0}...", self.ide) 76 for event in self._deferred_events: 77 log.debug("Propagating deferred {0}", event.describe()) 78 self.ide.channel.propagate(event) 79 log.info("All deferred events propagated to {0}.", self.ide) 80 self._deferred_events = None 81 82 # Generic event handler. There are no specific handlers for IDE events, because 83 # there are no events from the IDE in DAP - but we propagate them if we can, in 84 # case some events appear in future protocol versions. 85 @message_handler 86 def event(self, event): 87 if self.server: 88 self.server.channel.propagate(event) 89 90 # Generic request handler, used if there's no specific handler below. 91 @message_handler 92 def request(self, request): 93 return self.server.channel.delegate(request) 94 95 @message_handler 96 def initialize_request(self, request): 97 if self._initialize_request is not None: 98 raise request.isnt_valid("Session is already initialized") 99 100 self.client_id = request("clientID", "") 101 self.capabilities = self.Capabilities(self, request) 102 self.expectations = self.Expectations(self, request) 103 self._initialize_request = request 104 105 return { 106 "supportsCompletionsRequest": True, 107 "supportsConditionalBreakpoints": True, 108 "supportsConfigurationDoneRequest": True, 109 "supportsDebuggerProperties": True, 110 "supportsDelayedStackTraceLoading": True, 111 "supportsEvaluateForHovers": True, 112 "supportsExceptionInfoRequest": True, 113 "supportsExceptionOptions": True, 114 "supportsHitConditionalBreakpoints": True, 115 "supportsLogPoints": True, 116 "supportsModulesRequest": True, 117 "supportsSetExpression": True, 118 "supportsSetVariable": True, 119 "supportsValueFormattingOptions": True, 120 "supportsTerminateDebuggee": True, 121 "supportsGotoTargetsRequest": True, 122 "exceptionBreakpointFilters": [ 123 {"filter": "raised", "label": "Raised Exceptions", "default": False}, 124 {"filter": "uncaught", "label": "Uncaught Exceptions", "default": True}, 125 ], 126 } 127 128 # Common code for "launch" and "attach" request handlers. 129 # 130 # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522 131 # for the sequence of request and events necessary to orchestrate the start. 132 def _start_message_handler(f): 133 f = components.Component.message_handler(f) 134 135 def handle(self, request): 136 assert request.is_request("launch", "attach") 137 if self._initialize_request is None: 138 raise request.isnt_valid("Session is not initialized yet") 139 if self.launcher: 140 raise request.isnt_valid("Session is already started") 141 142 self.session.no_debug = request("noDebug", json.default(False)) 143 self.session.debug_options = set( 144 request("debugOptions", json.array(unicode)) 145 ) 146 147 f(self, request) 148 149 if self.server: 150 self.server.initialize(self._initialize_request) 151 self._initialize_request = None 152 153 # pydevd doesn't send "initialized", and responds to the start request 154 # immediately, without waiting for "configurationDone". If it changes 155 # to conform to the DAP spec, we'll need to defer waiting for response. 156 self.server.channel.delegate(request) 157 158 if self.session.no_debug: 159 request.respond({}) 160 self._propagate_deferred_events() 161 return 162 163 if {"WindowsClient", "Windows"} & self.session.debug_options: 164 client_os_type = "WINDOWS" 165 elif {"UnixClient", "UNIX"} & self.session.debug_options: 166 client_os_type = "UNIX" 167 else: 168 client_os_type = "WINDOWS" if platform.system() == "Windows" else "UNIX" 169 self.server.channel.request( 170 "setDebuggerProperty", 171 { 172 "skipSuspendOnBreakpointException": ("BaseException",), 173 "skipPrintBreakpointException": ("NameError",), 174 "multiThreadsSingleNotification": True, 175 "ideOS": client_os_type, 176 }, 177 ) 178 179 # Let the IDE know that it can begin configuring the adapter. 180 self.channel.send_event("initialized") 181 182 self._start_request = request 183 return messaging.NO_RESPONSE # will respond on "configurationDone" 184 185 return handle 186 187 @_start_message_handler 188 def launch_request(self, request): 189 sudo = request("sudo", json.default("Sudo" in self.session.debug_options)) 190 if sudo: 191 if platform.system() == "Windows": 192 raise request.cant_handle('"sudo":true is not supported on Windows.') 193 else: 194 if "Sudo" in self.session.debug_options: 195 raise request.isnt_valid( 196 '"sudo":false and "debugOptions":["Sudo"] are mutually exclusive' 197 ) 198 199 # Launcher doesn't use the command line at all, but we pass the arguments so 200 # that they show up in the terminal if we're using "runInTerminal". 201 if "program" in request: 202 args = request("program", json.array(unicode, vectorize=True, size=(1,))) 203 elif "module" in request: 204 args = ["-m"] + request( 205 "module", json.array(unicode, vectorize=True, size=(1,)) 206 ) 207 elif "code" in request: 208 args = ["-c"] + request( 209 "code", json.array(unicode, vectorize=True, size=(1,)) 210 ) 211 else: 212 args = [] 213 args += request("args", json.array(unicode)) 214 215 console = request( 216 "console", 217 json.enum( 218 "internalConsole", 219 "integratedTerminal", 220 "externalTerminal", 221 optional=True, 222 ), 223 ) 224 console_title = request("consoleTitle", json.default("Python Debug Console")) 225 226 self.session.spawn_debuggee(request, sudo, args, console, console_title) 227 228 if "RedirectOutput" in self.session.debug_options: 229 # The launcher is doing output redirection, so we don't need the server. 230 request.arguments["debugOptions"].remove("RedirectOutput") 231 232 @_start_message_handler 233 def attach_request(self, request): 234 if self.session.no_debug: 235 raise request.isnt_valid('"noDebug" is not supported for "attach"') 236 237 pid = request("processId", int, optional=True) 238 if pid == (): 239 # When the adapter is spawned by the debug server, it is connected to the 240 # latter from the get go, and "host" and "port" in the "attach" request 241 # are actually the host and port on which the adapter itself was listening, 242 # so we can ignore those. 243 if self.server: 244 return 245 246 host = request("host", "127.0.0.1") 247 port = request("port", int) 248 if request("listen", False): 249 with self.accept_connection_from_server((host, port)): 250 pass 251 else: 252 self.session.connect_to_server((host, port)) 253 else: 254 if self.server: 255 raise request.isnt_valid( 256 '"attach" with "processId" cannot be serviced by adapter ' 257 "that is already associated with a debug server" 258 ) 259 260 ptvsd_args = request("ptvsdArgs", json.array(unicode)) 261 self.session.inject_server(pid, ptvsd_args) 262 263 @message_handler 264 def configurationDone_request(self, request): 265 if self._start_request is None: 266 request.cant_handle( 267 '"configurationDone" is only allowed during handling of a "launch" ' 268 'or an "attach" request' 269 ) 270 271 try: 272 request.respond(self.server.channel.delegate(request)) 273 finally: 274 self._start_request.respond({}) 275 self._start_request = None 276 self._propagate_deferred_events() 277 278 @message_handler 279 def pause_request(self, request): 280 request.arguments["threadId"] = "*" 281 return self.server.channel.delegate(request) 282 283 @message_handler 284 def continue_request(self, request): 285 request.arguments["threadId"] = "*" 286 287 try: 288 return self.server.channel.delegate(request) 289 except messaging.NoMoreMessages: 290 # pydevd can sometimes allow the debuggee to exit before the queued 291 # "continue" response gets sent. Thus, a failed "continue" response 292 # indicating that the server disconnected should be treated as success. 293 return {"allThreadsContinued": True} 294 295 @message_handler 296 def ptvsd_systemInfo_request(self, request): 297 result = {"ptvsd": {"version": ptvsd.__version__}} 298 if self.server: 299 try: 300 pydevd_info = self.server.channel.request("pydevdSystemInfo") 301 except Exception: 302 # If the server has already disconnected, or couldn't handle it, 303 # report what we've got. 304 pass 305 else: 306 result.update(pydevd_info) 307 return result 308 309 @message_handler 310 def terminate_request(self, request): 311 self.session.finalize('IDE requested "terminate"', terminate_debuggee=True) 312 return {} 313 314 @message_handler 315 def disconnect_request(self, request): 316 self.session.finalize( 317 'IDE requested "disconnect"', 318 request("terminateDebuggee", json.default(bool(self.launcher))), 319 ) 320 return {} 321 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/ptvsd/adapter/ide.py b/src/ptvsd/adapter/ide.py --- a/src/ptvsd/adapter/ide.py +++ b/src/ptvsd/adapter/ide.py @@ -130,8 +130,7 @@ # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522 # for the sequence of request and events necessary to orchestrate the start. def _start_message_handler(f): - f = components.Component.message_handler(f) - + @components.Component.message_handler def handle(self, request): assert request.is_request("launch", "attach") if self._initialize_request is None:
{"golden_diff": "diff --git a/src/ptvsd/adapter/ide.py b/src/ptvsd/adapter/ide.py\n--- a/src/ptvsd/adapter/ide.py\n+++ b/src/ptvsd/adapter/ide.py\n@@ -130,8 +130,7 @@\n # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522\n # for the sequence of request and events necessary to orchestrate the start.\n def _start_message_handler(f):\n- f = components.Component.message_handler(f)\n-\n+ @components.Component.message_handler\n def handle(self, request):\n assert request.is_request(\"launch\", \"attach\")\n if self._initialize_request is None:\n", "issue": "Deferred server events are not always propagated\nHandlers for \"launch\" and \"attach\" requests in the adapter don't lock the session correctly. As a result, there's a race condition between the code that replays server events after responding to those requests, and the code that queues up server events for replaying - and it's possible for an event to be deferred after events have already been replayed, which results in it never being propagated.\r\n\r\n\"process\" event is the one this affects most often, and when that happens, it breaks the debug session.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport platform\n\nimport ptvsd\nfrom ptvsd.common import json, log, messaging\nfrom ptvsd.common.compat import unicode\nfrom ptvsd.adapter import components\n\n\nclass IDE(components.Component):\n \"\"\"Handles the IDE side of a debug session.\"\"\"\n\n message_handler = components.Component.message_handler\n\n class Capabilities(components.Capabilities):\n PROPERTIES = {\n \"supportsVariableType\": False,\n \"supportsVariablePaging\": False,\n \"supportsRunInTerminalRequest\": False,\n \"supportsMemoryReferences\": False,\n }\n\n class Expectations(components.Capabilities):\n PROPERTIES = {\n \"locale\": \"en-US\",\n \"linesStartAt1\": True,\n \"columnsStartAt1\": True,\n \"pathFormat\": json.enum(\"path\"), # we don't support \"uri\"\n }\n\n def __init__(self, session, stream):\n super(IDE, self).__init__(session, stream)\n\n self.client_id = None\n \"\"\"ID of the connecting client. This can be 'test' while running tests.\"\"\"\n\n self._initialize_request = None\n \"\"\"The \"initialize\" request as received from the IDE, to propagate to the\n server later.\"\"\"\n\n self._deferred_events = []\n \"\"\"Deferred events from the launcher and the server that must be propagated\n only if and when the \"launch\" or \"attach\" response is sent.\n \"\"\"\n\n assert not session.ide\n session.ide = self\n\n self.channel.send_event(\n \"output\",\n {\n \"category\": \"telemetry\",\n \"output\": \"ptvsd.adapter\",\n \"data\": {\"packageVersion\": ptvsd.__version__},\n },\n )\n\n def propagate_after_start(self, event):\n # pydevd starts sending events as soon as we connect, but the IDE doesn't\n # expect to see any until it receives the response to \"launch\" or \"attach\"\n # request. If IDE is not ready yet, save the event instead of propagating\n # it immediately.\n if self._deferred_events is not None:\n self._deferred_events.append(event)\n log.debug(\"Propagation deferred.\")\n else:\n self.ide.channel.propagate(event)\n\n def _propagate_deferred_events(self):\n log.debug(\"Propagating deferred events to {0}...\", self.ide)\n for event in self._deferred_events:\n log.debug(\"Propagating deferred {0}\", event.describe())\n self.ide.channel.propagate(event)\n log.info(\"All deferred events propagated to {0}.\", self.ide)\n self._deferred_events = None\n\n # Generic event handler. There are no specific handlers for IDE events, because\n # there are no events from the IDE in DAP - but we propagate them if we can, in\n # case some events appear in future protocol versions.\n @message_handler\n def event(self, event):\n if self.server:\n self.server.channel.propagate(event)\n\n # Generic request handler, used if there's no specific handler below.\n @message_handler\n def request(self, request):\n return self.server.channel.delegate(request)\n\n @message_handler\n def initialize_request(self, request):\n if self._initialize_request is not None:\n raise request.isnt_valid(\"Session is already initialized\")\n\n self.client_id = request(\"clientID\", \"\")\n self.capabilities = self.Capabilities(self, request)\n self.expectations = self.Expectations(self, request)\n self._initialize_request = request\n\n return {\n \"supportsCompletionsRequest\": True,\n \"supportsConditionalBreakpoints\": True,\n \"supportsConfigurationDoneRequest\": True,\n \"supportsDebuggerProperties\": True,\n \"supportsDelayedStackTraceLoading\": True,\n \"supportsEvaluateForHovers\": True,\n \"supportsExceptionInfoRequest\": True,\n \"supportsExceptionOptions\": True,\n \"supportsHitConditionalBreakpoints\": True,\n \"supportsLogPoints\": True,\n \"supportsModulesRequest\": True,\n \"supportsSetExpression\": True,\n \"supportsSetVariable\": True,\n \"supportsValueFormattingOptions\": True,\n \"supportsTerminateDebuggee\": True,\n \"supportsGotoTargetsRequest\": True,\n \"exceptionBreakpointFilters\": [\n {\"filter\": \"raised\", \"label\": \"Raised Exceptions\", \"default\": False},\n {\"filter\": \"uncaught\", \"label\": \"Uncaught Exceptions\", \"default\": True},\n ],\n }\n\n # Common code for \"launch\" and \"attach\" request handlers.\n #\n # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522\n # for the sequence of request and events necessary to orchestrate the start.\n def _start_message_handler(f):\n f = components.Component.message_handler(f)\n\n def handle(self, request):\n assert request.is_request(\"launch\", \"attach\")\n if self._initialize_request is None:\n raise request.isnt_valid(\"Session is not initialized yet\")\n if self.launcher:\n raise request.isnt_valid(\"Session is already started\")\n\n self.session.no_debug = request(\"noDebug\", json.default(False))\n self.session.debug_options = set(\n request(\"debugOptions\", json.array(unicode))\n )\n\n f(self, request)\n\n if self.server:\n self.server.initialize(self._initialize_request)\n self._initialize_request = None\n\n # pydevd doesn't send \"initialized\", and responds to the start request\n # immediately, without waiting for \"configurationDone\". If it changes\n # to conform to the DAP spec, we'll need to defer waiting for response.\n self.server.channel.delegate(request)\n\n if self.session.no_debug:\n request.respond({})\n self._propagate_deferred_events()\n return\n\n if {\"WindowsClient\", \"Windows\"} & self.session.debug_options:\n client_os_type = \"WINDOWS\"\n elif {\"UnixClient\", \"UNIX\"} & self.session.debug_options:\n client_os_type = \"UNIX\"\n else:\n client_os_type = \"WINDOWS\" if platform.system() == \"Windows\" else \"UNIX\"\n self.server.channel.request(\n \"setDebuggerProperty\",\n {\n \"skipSuspendOnBreakpointException\": (\"BaseException\",),\n \"skipPrintBreakpointException\": (\"NameError\",),\n \"multiThreadsSingleNotification\": True,\n \"ideOS\": client_os_type,\n },\n )\n\n # Let the IDE know that it can begin configuring the adapter.\n self.channel.send_event(\"initialized\")\n\n self._start_request = request\n return messaging.NO_RESPONSE # will respond on \"configurationDone\"\n\n return handle\n\n @_start_message_handler\n def launch_request(self, request):\n sudo = request(\"sudo\", json.default(\"Sudo\" in self.session.debug_options))\n if sudo:\n if platform.system() == \"Windows\":\n raise request.cant_handle('\"sudo\":true is not supported on Windows.')\n else:\n if \"Sudo\" in self.session.debug_options:\n raise request.isnt_valid(\n '\"sudo\":false and \"debugOptions\":[\"Sudo\"] are mutually exclusive'\n )\n\n # Launcher doesn't use the command line at all, but we pass the arguments so\n # that they show up in the terminal if we're using \"runInTerminal\".\n if \"program\" in request:\n args = request(\"program\", json.array(unicode, vectorize=True, size=(1,)))\n elif \"module\" in request:\n args = [\"-m\"] + request(\n \"module\", json.array(unicode, vectorize=True, size=(1,))\n )\n elif \"code\" in request:\n args = [\"-c\"] + request(\n \"code\", json.array(unicode, vectorize=True, size=(1,))\n )\n else:\n args = []\n args += request(\"args\", json.array(unicode))\n\n console = request(\n \"console\",\n json.enum(\n \"internalConsole\",\n \"integratedTerminal\",\n \"externalTerminal\",\n optional=True,\n ),\n )\n console_title = request(\"consoleTitle\", json.default(\"Python Debug Console\"))\n\n self.session.spawn_debuggee(request, sudo, args, console, console_title)\n\n if \"RedirectOutput\" in self.session.debug_options:\n # The launcher is doing output redirection, so we don't need the server.\n request.arguments[\"debugOptions\"].remove(\"RedirectOutput\")\n\n @_start_message_handler\n def attach_request(self, request):\n if self.session.no_debug:\n raise request.isnt_valid('\"noDebug\" is not supported for \"attach\"')\n\n pid = request(\"processId\", int, optional=True)\n if pid == ():\n # When the adapter is spawned by the debug server, it is connected to the\n # latter from the get go, and \"host\" and \"port\" in the \"attach\" request\n # are actually the host and port on which the adapter itself was listening,\n # so we can ignore those.\n if self.server:\n return\n\n host = request(\"host\", \"127.0.0.1\")\n port = request(\"port\", int)\n if request(\"listen\", False):\n with self.accept_connection_from_server((host, port)):\n pass\n else:\n self.session.connect_to_server((host, port))\n else:\n if self.server:\n raise request.isnt_valid(\n '\"attach\" with \"processId\" cannot be serviced by adapter '\n \"that is already associated with a debug server\"\n )\n\n ptvsd_args = request(\"ptvsdArgs\", json.array(unicode))\n self.session.inject_server(pid, ptvsd_args)\n\n @message_handler\n def configurationDone_request(self, request):\n if self._start_request is None:\n request.cant_handle(\n '\"configurationDone\" is only allowed during handling of a \"launch\" '\n 'or an \"attach\" request'\n )\n\n try:\n request.respond(self.server.channel.delegate(request))\n finally:\n self._start_request.respond({})\n self._start_request = None\n self._propagate_deferred_events()\n\n @message_handler\n def pause_request(self, request):\n request.arguments[\"threadId\"] = \"*\"\n return self.server.channel.delegate(request)\n\n @message_handler\n def continue_request(self, request):\n request.arguments[\"threadId\"] = \"*\"\n\n try:\n return self.server.channel.delegate(request)\n except messaging.NoMoreMessages:\n # pydevd can sometimes allow the debuggee to exit before the queued\n # \"continue\" response gets sent. Thus, a failed \"continue\" response\n # indicating that the server disconnected should be treated as success.\n return {\"allThreadsContinued\": True}\n\n @message_handler\n def ptvsd_systemInfo_request(self, request):\n result = {\"ptvsd\": {\"version\": ptvsd.__version__}}\n if self.server:\n try:\n pydevd_info = self.server.channel.request(\"pydevdSystemInfo\")\n except Exception:\n # If the server has already disconnected, or couldn't handle it,\n # report what we've got.\n pass\n else:\n result.update(pydevd_info)\n return result\n\n @message_handler\n def terminate_request(self, request):\n self.session.finalize('IDE requested \"terminate\"', terminate_debuggee=True)\n return {}\n\n @message_handler\n def disconnect_request(self, request):\n self.session.finalize(\n 'IDE requested \"disconnect\"',\n request(\"terminateDebuggee\", json.default(bool(self.launcher))),\n )\n return {}\n", "path": "src/ptvsd/adapter/ide.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport platform\n\nimport ptvsd\nfrom ptvsd.common import json, log, messaging\nfrom ptvsd.common.compat import unicode\nfrom ptvsd.adapter import components\n\n\nclass IDE(components.Component):\n \"\"\"Handles the IDE side of a debug session.\"\"\"\n\n message_handler = components.Component.message_handler\n\n class Capabilities(components.Capabilities):\n PROPERTIES = {\n \"supportsVariableType\": False,\n \"supportsVariablePaging\": False,\n \"supportsRunInTerminalRequest\": False,\n \"supportsMemoryReferences\": False,\n }\n\n class Expectations(components.Capabilities):\n PROPERTIES = {\n \"locale\": \"en-US\",\n \"linesStartAt1\": True,\n \"columnsStartAt1\": True,\n \"pathFormat\": json.enum(\"path\"), # we don't support \"uri\"\n }\n\n def __init__(self, session, stream):\n super(IDE, self).__init__(session, stream)\n\n self.client_id = None\n \"\"\"ID of the connecting client. This can be 'test' while running tests.\"\"\"\n\n self._initialize_request = None\n \"\"\"The \"initialize\" request as received from the IDE, to propagate to the\n server later.\"\"\"\n\n self._deferred_events = []\n \"\"\"Deferred events from the launcher and the server that must be propagated\n only if and when the \"launch\" or \"attach\" response is sent.\n \"\"\"\n\n assert not session.ide\n session.ide = self\n\n self.channel.send_event(\n \"output\",\n {\n \"category\": \"telemetry\",\n \"output\": \"ptvsd.adapter\",\n \"data\": {\"packageVersion\": ptvsd.__version__},\n },\n )\n\n def propagate_after_start(self, event):\n # pydevd starts sending events as soon as we connect, but the IDE doesn't\n # expect to see any until it receives the response to \"launch\" or \"attach\"\n # request. If IDE is not ready yet, save the event instead of propagating\n # it immediately.\n if self._deferred_events is not None:\n self._deferred_events.append(event)\n log.debug(\"Propagation deferred.\")\n else:\n self.ide.channel.propagate(event)\n\n def _propagate_deferred_events(self):\n log.debug(\"Propagating deferred events to {0}...\", self.ide)\n for event in self._deferred_events:\n log.debug(\"Propagating deferred {0}\", event.describe())\n self.ide.channel.propagate(event)\n log.info(\"All deferred events propagated to {0}.\", self.ide)\n self._deferred_events = None\n\n # Generic event handler. There are no specific handlers for IDE events, because\n # there are no events from the IDE in DAP - but we propagate them if we can, in\n # case some events appear in future protocol versions.\n @message_handler\n def event(self, event):\n if self.server:\n self.server.channel.propagate(event)\n\n # Generic request handler, used if there's no specific handler below.\n @message_handler\n def request(self, request):\n return self.server.channel.delegate(request)\n\n @message_handler\n def initialize_request(self, request):\n if self._initialize_request is not None:\n raise request.isnt_valid(\"Session is already initialized\")\n\n self.client_id = request(\"clientID\", \"\")\n self.capabilities = self.Capabilities(self, request)\n self.expectations = self.Expectations(self, request)\n self._initialize_request = request\n\n return {\n \"supportsCompletionsRequest\": True,\n \"supportsConditionalBreakpoints\": True,\n \"supportsConfigurationDoneRequest\": True,\n \"supportsDebuggerProperties\": True,\n \"supportsDelayedStackTraceLoading\": True,\n \"supportsEvaluateForHovers\": True,\n \"supportsExceptionInfoRequest\": True,\n \"supportsExceptionOptions\": True,\n \"supportsHitConditionalBreakpoints\": True,\n \"supportsLogPoints\": True,\n \"supportsModulesRequest\": True,\n \"supportsSetExpression\": True,\n \"supportsSetVariable\": True,\n \"supportsValueFormattingOptions\": True,\n \"supportsTerminateDebuggee\": True,\n \"supportsGotoTargetsRequest\": True,\n \"exceptionBreakpointFilters\": [\n {\"filter\": \"raised\", \"label\": \"Raised Exceptions\", \"default\": False},\n {\"filter\": \"uncaught\", \"label\": \"Uncaught Exceptions\", \"default\": True},\n ],\n }\n\n # Common code for \"launch\" and \"attach\" request handlers.\n #\n # See https://github.com/microsoft/vscode/issues/4902#issuecomment-368583522\n # for the sequence of request and events necessary to orchestrate the start.\n def _start_message_handler(f):\n @components.Component.message_handler\n def handle(self, request):\n assert request.is_request(\"launch\", \"attach\")\n if self._initialize_request is None:\n raise request.isnt_valid(\"Session is not initialized yet\")\n if self.launcher:\n raise request.isnt_valid(\"Session is already started\")\n\n self.session.no_debug = request(\"noDebug\", json.default(False))\n self.session.debug_options = set(\n request(\"debugOptions\", json.array(unicode))\n )\n\n f(self, request)\n\n if self.server:\n self.server.initialize(self._initialize_request)\n self._initialize_request = None\n\n # pydevd doesn't send \"initialized\", and responds to the start request\n # immediately, without waiting for \"configurationDone\". If it changes\n # to conform to the DAP spec, we'll need to defer waiting for response.\n self.server.channel.delegate(request)\n\n if self.session.no_debug:\n request.respond({})\n self._propagate_deferred_events()\n return\n\n if {\"WindowsClient\", \"Windows\"} & self.session.debug_options:\n client_os_type = \"WINDOWS\"\n elif {\"UnixClient\", \"UNIX\"} & self.session.debug_options:\n client_os_type = \"UNIX\"\n else:\n client_os_type = \"WINDOWS\" if platform.system() == \"Windows\" else \"UNIX\"\n self.server.channel.request(\n \"setDebuggerProperty\",\n {\n \"skipSuspendOnBreakpointException\": (\"BaseException\",),\n \"skipPrintBreakpointException\": (\"NameError\",),\n \"multiThreadsSingleNotification\": True,\n \"ideOS\": client_os_type,\n },\n )\n\n # Let the IDE know that it can begin configuring the adapter.\n self.channel.send_event(\"initialized\")\n\n self._start_request = request\n return messaging.NO_RESPONSE # will respond on \"configurationDone\"\n\n return handle\n\n @_start_message_handler\n def launch_request(self, request):\n sudo = request(\"sudo\", json.default(\"Sudo\" in self.session.debug_options))\n if sudo:\n if platform.system() == \"Windows\":\n raise request.cant_handle('\"sudo\":true is not supported on Windows.')\n else:\n if \"Sudo\" in self.session.debug_options:\n raise request.isnt_valid(\n '\"sudo\":false and \"debugOptions\":[\"Sudo\"] are mutually exclusive'\n )\n\n # Launcher doesn't use the command line at all, but we pass the arguments so\n # that they show up in the terminal if we're using \"runInTerminal\".\n if \"program\" in request:\n args = request(\"program\", json.array(unicode, vectorize=True, size=(1,)))\n elif \"module\" in request:\n args = [\"-m\"] + request(\n \"module\", json.array(unicode, vectorize=True, size=(1,))\n )\n elif \"code\" in request:\n args = [\"-c\"] + request(\n \"code\", json.array(unicode, vectorize=True, size=(1,))\n )\n else:\n args = []\n args += request(\"args\", json.array(unicode))\n\n console = request(\n \"console\",\n json.enum(\n \"internalConsole\",\n \"integratedTerminal\",\n \"externalTerminal\",\n optional=True,\n ),\n )\n console_title = request(\"consoleTitle\", json.default(\"Python Debug Console\"))\n\n self.session.spawn_debuggee(request, sudo, args, console, console_title)\n\n if \"RedirectOutput\" in self.session.debug_options:\n # The launcher is doing output redirection, so we don't need the server.\n request.arguments[\"debugOptions\"].remove(\"RedirectOutput\")\n\n @_start_message_handler\n def attach_request(self, request):\n if self.session.no_debug:\n raise request.isnt_valid('\"noDebug\" is not supported for \"attach\"')\n\n pid = request(\"processId\", int, optional=True)\n if pid == ():\n # When the adapter is spawned by the debug server, it is connected to the\n # latter from the get go, and \"host\" and \"port\" in the \"attach\" request\n # are actually the host and port on which the adapter itself was listening,\n # so we can ignore those.\n if self.server:\n return\n\n host = request(\"host\", \"127.0.0.1\")\n port = request(\"port\", int)\n if request(\"listen\", False):\n with self.accept_connection_from_server((host, port)):\n pass\n else:\n self.session.connect_to_server((host, port))\n else:\n if self.server:\n raise request.isnt_valid(\n '\"attach\" with \"processId\" cannot be serviced by adapter '\n \"that is already associated with a debug server\"\n )\n\n ptvsd_args = request(\"ptvsdArgs\", json.array(unicode))\n self.session.inject_server(pid, ptvsd_args)\n\n @message_handler\n def configurationDone_request(self, request):\n if self._start_request is None:\n request.cant_handle(\n '\"configurationDone\" is only allowed during handling of a \"launch\" '\n 'or an \"attach\" request'\n )\n\n try:\n request.respond(self.server.channel.delegate(request))\n finally:\n self._start_request.respond({})\n self._start_request = None\n self._propagate_deferred_events()\n\n @message_handler\n def pause_request(self, request):\n request.arguments[\"threadId\"] = \"*\"\n return self.server.channel.delegate(request)\n\n @message_handler\n def continue_request(self, request):\n request.arguments[\"threadId\"] = \"*\"\n\n try:\n return self.server.channel.delegate(request)\n except messaging.NoMoreMessages:\n # pydevd can sometimes allow the debuggee to exit before the queued\n # \"continue\" response gets sent. Thus, a failed \"continue\" response\n # indicating that the server disconnected should be treated as success.\n return {\"allThreadsContinued\": True}\n\n @message_handler\n def ptvsd_systemInfo_request(self, request):\n result = {\"ptvsd\": {\"version\": ptvsd.__version__}}\n if self.server:\n try:\n pydevd_info = self.server.channel.request(\"pydevdSystemInfo\")\n except Exception:\n # If the server has already disconnected, or couldn't handle it,\n # report what we've got.\n pass\n else:\n result.update(pydevd_info)\n return result\n\n @message_handler\n def terminate_request(self, request):\n self.session.finalize('IDE requested \"terminate\"', terminate_debuggee=True)\n return {}\n\n @message_handler\n def disconnect_request(self, request):\n self.session.finalize(\n 'IDE requested \"disconnect\"',\n request(\"terminateDebuggee\", json.default(bool(self.launcher))),\n )\n return {}\n", "path": "src/ptvsd/adapter/ide.py"}]}
3,777
168
gh_patches_debug_17320
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1324
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Only keep necessary dependencies in requirements.txt Currently, we have one single `requirements.txt` file which contains some unnecessary dependencies used for testing only such as `pytest`, `pytest-cov`, and `mock`. They should not be part of `requirements.txt` and should not be distributed with `elasticdl` Python package. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import find_packages, setup 2 3 with open("elasticdl/requirements.txt") as f: 4 requirements = f.read().splitlines() 5 6 setup( 7 name="elasticdl", 8 version="develop", 9 description="A Kubernetes-native Deep Learning Framework", 10 long_description="ElasticDL is a Kubernetes-native deep learning framework" 11 " built on top of TensorFlow 2.0 that supports" 12 " fault-tolerance and elastic scheduling.", 13 long_description_content_type="text/markdown", 14 author="Ant Financial", 15 url="https://elasticdl.org", 16 install_requires=requirements, 17 packages=find_packages(exclude=["*test*"]), 18 package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]}, 19 entry_points={ 20 "console_scripts": ["elasticdl=elasticdl.python.elasticdl.client:main"] 21 }, 22 ) 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,7 +1,11 @@ from setuptools import find_packages, setup with open("elasticdl/requirements.txt") as f: - requirements = f.read().splitlines() + required_deps = f.read().splitlines() + +extras = {} +with open("elasticdl/requirements-dev.txt") as f: + extras["develop"] = f.read().splitlines() setup( name="elasticdl", @@ -13,7 +17,8 @@ long_description_content_type="text/markdown", author="Ant Financial", url="https://elasticdl.org", - install_requires=requirements, + install_requires=required_deps, + extras_require=extras, packages=find_packages(exclude=["*test*"]), package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]}, entry_points={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,11 @@\n from setuptools import find_packages, setup\n \n with open(\"elasticdl/requirements.txt\") as f:\n- requirements = f.read().splitlines()\n+ required_deps = f.read().splitlines()\n+\n+extras = {}\n+with open(\"elasticdl/requirements-dev.txt\") as f:\n+ extras[\"develop\"] = f.read().splitlines()\n \n setup(\n name=\"elasticdl\",\n@@ -13,7 +17,8 @@\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n- install_requires=requirements,\n+ install_requires=required_deps,\n+ extras_require=extras,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n", "issue": "Only keep necessary dependencies in requirements.txt\nCurrently, we have one single `requirements.txt` file which contains some unnecessary dependencies used for testing only such as `pytest`, `pytest-cov`, and `mock`. They should not be part of `requirements.txt` and should not be distributed with `elasticdl` Python package.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=requirements,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n \"console_scripts\": [\"elasticdl=elasticdl.python.elasticdl.client:main\"]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nextras = {}\nwith open(\"elasticdl/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n \"console_scripts\": [\"elasticdl=elasticdl.python.elasticdl.client:main\"]\n },\n)\n", "path": "setup.py"}]}
539
208
gh_patches_debug_13147
rasdani/github-patches
git_diff
google__jax-399
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- float16 support Add support for `np.float16`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jax/abstract_arrays.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 from __future__ import division 17 from __future__ import print_function 18 19 import numpy as onp 20 import six 21 22 from . import core 23 from . import ad_util 24 from . util import prod 25 from .lib import xla_bridge 26 27 28 def concretization_err_msg(fun): 29 fname = getattr(fun, "__name__", fun) 30 msg = ("Abstract value passed to `{}`, which requires a concrete value. " 31 "The function to be transformed can't be traced at the required level " 32 "of abstraction. If using `jit`, try using `static_argnums` or " 33 "applying `jit` to smaller subfunctions instead.") 34 return msg.format(fname) 35 36 def concretization_function_error(fun): 37 def error(self, *args): 38 raise TypeError(concretization_err_msg(fun)) 39 return error 40 41 42 class UnshapedArray(core.AbstractValue): 43 __slots__ = ['dtype'] 44 array_abstraction_level = 3 45 46 def __init__(self, dtype): 47 self.dtype = dtype 48 49 def __eq__(self, other): 50 return type(self) is type(other) and self.dtype == other.dtype 51 52 def __hash__(self): 53 return hash(str(self.dtype)) 54 55 def __repr__(self): 56 return '{}({})'.format(self.__class__.__name__, self.str_short()) 57 58 _bool = _nonzero = concretization_function_error(bool) 59 _float = concretization_function_error(float) 60 _int = concretization_function_error(int) 61 if six.PY2: 62 _long = concretization_function_error(long) # noqa: F821 63 _complex = concretization_function_error(complex) 64 _hex = concretization_function_error(hex) 65 _oct = concretization_function_error(oct) 66 67 def at_least_vspace(self): 68 return self 69 70 def join(self, other): 71 return self 72 73 def str_short(self): 74 return onp.dtype(self.dtype).name 75 76 77 class ShapedArray(UnshapedArray): 78 __slots__ = ['shape'] 79 array_abstraction_level = 2 80 81 def __init__(self, shape, dtype): 82 self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)) 83 self.shape = shape 84 85 ndim = property(lambda self: len(self.shape)) 86 size = property(lambda self: prod(self.shape)) 87 88 def __eq__(self, other): 89 return (type(self) is type(other) 90 and self.dtype == other.dtype and self.shape == other.shape) 91 92 def __hash__(self): 93 return hash((self.shape, str(self.dtype))) 94 95 def at_least_vspace(self): 96 return self 97 98 def join(self, other): 99 if self.shape == other.shape and self.dtype == other.dtype: 100 return self 101 elif self.dtype == other.dtype: 102 return UnshapedArray(self.dtype) 103 else: 104 raise TypeError(other) 105 106 def str_short(self): 107 dtypestr = onp.dtype(self.dtype).name 108 shapestr = ','.join(map(str, self.shape)) 109 return '{}[{}]'.format(dtypestr, shapestr) 110 111 def __len__(self): 112 try: 113 return self.shape[0] 114 except IndexError: 115 raise TypeError("len() of unsized object") # same as numpy error 116 117 def _len(self, ignored_tracer): 118 return len(self) 119 120 121 class ConcreteArray(ShapedArray): 122 __slots__ = ['val'] 123 array_abstraction_level = 0 124 125 def __init__(self, val): 126 self.val = val 127 self.shape = onp.shape(val) 128 # canonicalized self.dtype doesn't necessarily match self.val 129 self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(onp.result_type(val))) 130 assert self.dtype != onp.dtype('O') 131 132 def __eq__(self, other): 133 return (type(self) is type(other) and self.dtype == other.dtype 134 and self.shape == other.shape and onp.all(self.val == other.val)) 135 136 def __hash__(self): 137 return id(self.val) 138 139 def at_least_vspace(self): 140 return ShapedArray(self.shape, self.dtype) 141 142 def join(self, other): 143 if self == other: 144 return self 145 elif self.shape == other.shape and self.dtype == other.dtype: 146 return ShapedArray(self.shape, self.dtype) 147 elif self.dtype == other.dtype: 148 return UnshapedArray(self.dtype) 149 else: 150 raise TypeError(other) 151 152 def str_short(self): 153 return str(self.val) 154 155 156 def make_shaped_array(x): 157 dtype = xla_bridge.canonicalize_dtype(onp.result_type(x)) 158 return ShapedArray(onp.shape(x), dtype) 159 160 def zeros_like_array(x): 161 dtype = xla_bridge.canonicalize_dtype(onp.result_type(x)) 162 return onp.broadcast_to(onp.array(0, dtype), onp.shape(x)) 163 164 array_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64, 165 onp.complex128, onp.int64, onp.int32, onp.bool_, onp.uint64, 166 onp.uint32, complex, float, int, bool] 167 168 for t in array_types: 169 core.pytype_aval_mappings[t] = ConcreteArray 170 ad_util.jaxval_zeros_likers[t] = zeros_like_array 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jax/abstract_arrays.py b/jax/abstract_arrays.py --- a/jax/abstract_arrays.py +++ b/jax/abstract_arrays.py @@ -161,9 +161,11 @@ dtype = xla_bridge.canonicalize_dtype(onp.result_type(x)) return onp.broadcast_to(onp.array(0, dtype), onp.shape(x)) -array_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64, - onp.complex128, onp.int64, onp.int32, onp.bool_, onp.uint64, - onp.uint32, complex, float, int, bool] +array_types = [onp.ndarray, onp.float64, onp.float32, onp.float16, + onp.complex64, onp.complex128, + onp.int64, onp.int32, onp.int16, onp.int8, + onp.bool_, onp.uint64, onp.uint32, onp.uint16, onp.uint8, + complex, float, int, bool] for t in array_types: core.pytype_aval_mappings[t] = ConcreteArray
{"golden_diff": "diff --git a/jax/abstract_arrays.py b/jax/abstract_arrays.py\n--- a/jax/abstract_arrays.py\n+++ b/jax/abstract_arrays.py\n@@ -161,9 +161,11 @@\n dtype = xla_bridge.canonicalize_dtype(onp.result_type(x))\n return onp.broadcast_to(onp.array(0, dtype), onp.shape(x))\n \n-array_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64,\n- onp.complex128, onp.int64, onp.int32, onp.bool_, onp.uint64,\n- onp.uint32, complex, float, int, bool]\n+array_types = [onp.ndarray, onp.float64, onp.float32, onp.float16,\n+ onp.complex64, onp.complex128,\n+ onp.int64, onp.int32, onp.int16, onp.int8,\n+ onp.bool_, onp.uint64, onp.uint32, onp.uint16, onp.uint8,\n+ complex, float, int, bool]\n \n for t in array_types:\n core.pytype_aval_mappings[t] = ConcreteArray\n", "issue": "float16 support\nAdd support for `np.float16`.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as onp\nimport six\n\nfrom . import core\nfrom . import ad_util\nfrom . util import prod\nfrom .lib import xla_bridge\n\n\ndef concretization_err_msg(fun):\n fname = getattr(fun, \"__name__\", fun)\n msg = (\"Abstract value passed to `{}`, which requires a concrete value. \"\n \"The function to be transformed can't be traced at the required level \"\n \"of abstraction. If using `jit`, try using `static_argnums` or \"\n \"applying `jit` to smaller subfunctions instead.\")\n return msg.format(fname)\n\ndef concretization_function_error(fun):\n def error(self, *args):\n raise TypeError(concretization_err_msg(fun))\n return error\n\n\nclass UnshapedArray(core.AbstractValue):\n __slots__ = ['dtype']\n array_abstraction_level = 3\n\n def __init__(self, dtype):\n self.dtype = dtype\n\n def __eq__(self, other):\n return type(self) is type(other) and self.dtype == other.dtype\n\n def __hash__(self):\n return hash(str(self.dtype))\n\n def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, self.str_short())\n\n _bool = _nonzero = concretization_function_error(bool)\n _float = concretization_function_error(float)\n _int = concretization_function_error(int)\n if six.PY2:\n _long = concretization_function_error(long) # noqa: F821\n _complex = concretization_function_error(complex)\n _hex = concretization_function_error(hex)\n _oct = concretization_function_error(oct)\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n return self\n\n def str_short(self):\n return onp.dtype(self.dtype).name\n\n\nclass ShapedArray(UnshapedArray):\n __slots__ = ['shape']\n array_abstraction_level = 2\n\n def __init__(self, shape, dtype):\n self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype))\n self.shape = shape\n\n ndim = property(lambda self: len(self.shape))\n size = property(lambda self: prod(self.shape))\n\n def __eq__(self, other):\n return (type(self) is type(other)\n and self.dtype == other.dtype and self.shape == other.shape)\n\n def __hash__(self):\n return hash((self.shape, str(self.dtype)))\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n if self.shape == other.shape and self.dtype == other.dtype:\n return self\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(other)\n\n def str_short(self):\n dtypestr = onp.dtype(self.dtype).name\n shapestr = ','.join(map(str, self.shape))\n return '{}[{}]'.format(dtypestr, shapestr)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\") # same as numpy error\n\n def _len(self, ignored_tracer):\n return len(self)\n\n\nclass ConcreteArray(ShapedArray):\n __slots__ = ['val']\n array_abstraction_level = 0\n\n def __init__(self, val):\n self.val = val\n self.shape = onp.shape(val)\n # canonicalized self.dtype doesn't necessarily match self.val\n self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(onp.result_type(val)))\n assert self.dtype != onp.dtype('O')\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype\n and self.shape == other.shape and onp.all(self.val == other.val))\n\n def __hash__(self):\n return id(self.val)\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, self.dtype)\n\n def join(self, other):\n if self == other:\n return self\n elif self.shape == other.shape and self.dtype == other.dtype:\n return ShapedArray(self.shape, self.dtype)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(other)\n\n def str_short(self):\n return str(self.val)\n\n\ndef make_shaped_array(x):\n dtype = xla_bridge.canonicalize_dtype(onp.result_type(x))\n return ShapedArray(onp.shape(x), dtype)\n\ndef zeros_like_array(x):\n dtype = xla_bridge.canonicalize_dtype(onp.result_type(x))\n return onp.broadcast_to(onp.array(0, dtype), onp.shape(x))\n\narray_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64,\n onp.complex128, onp.int64, onp.int32, onp.bool_, onp.uint64,\n onp.uint32, complex, float, int, bool]\n\nfor t in array_types:\n core.pytype_aval_mappings[t] = ConcreteArray\n ad_util.jaxval_zeros_likers[t] = zeros_like_array\n", "path": "jax/abstract_arrays.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as onp\nimport six\n\nfrom . import core\nfrom . import ad_util\nfrom . util import prod\nfrom .lib import xla_bridge\n\n\ndef concretization_err_msg(fun):\n fname = getattr(fun, \"__name__\", fun)\n msg = (\"Abstract value passed to `{}`, which requires a concrete value. \"\n \"The function to be transformed can't be traced at the required level \"\n \"of abstraction. If using `jit`, try using `static_argnums` or \"\n \"applying `jit` to smaller subfunctions instead.\")\n return msg.format(fname)\n\ndef concretization_function_error(fun):\n def error(self, *args):\n raise TypeError(concretization_err_msg(fun))\n return error\n\n\nclass UnshapedArray(core.AbstractValue):\n __slots__ = ['dtype']\n array_abstraction_level = 3\n\n def __init__(self, dtype):\n self.dtype = dtype\n\n def __eq__(self, other):\n return type(self) is type(other) and self.dtype == other.dtype\n\n def __hash__(self):\n return hash(str(self.dtype))\n\n def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, self.str_short())\n\n _bool = _nonzero = concretization_function_error(bool)\n _float = concretization_function_error(float)\n _int = concretization_function_error(int)\n if six.PY2:\n _long = concretization_function_error(long) # noqa: F821\n _complex = concretization_function_error(complex)\n _hex = concretization_function_error(hex)\n _oct = concretization_function_error(oct)\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n return self\n\n def str_short(self):\n return onp.dtype(self.dtype).name\n\n\nclass ShapedArray(UnshapedArray):\n __slots__ = ['shape']\n array_abstraction_level = 2\n\n def __init__(self, shape, dtype):\n self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype))\n self.shape = shape\n\n ndim = property(lambda self: len(self.shape))\n size = property(lambda self: prod(self.shape))\n\n def __eq__(self, other):\n return (type(self) is type(other)\n and self.dtype == other.dtype and self.shape == other.shape)\n\n def __hash__(self):\n return hash((self.shape, str(self.dtype)))\n\n def at_least_vspace(self):\n return self\n\n def join(self, other):\n if self.shape == other.shape and self.dtype == other.dtype:\n return self\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(other)\n\n def str_short(self):\n dtypestr = onp.dtype(self.dtype).name\n shapestr = ','.join(map(str, self.shape))\n return '{}[{}]'.format(dtypestr, shapestr)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\") # same as numpy error\n\n def _len(self, ignored_tracer):\n return len(self)\n\n\nclass ConcreteArray(ShapedArray):\n __slots__ = ['val']\n array_abstraction_level = 0\n\n def __init__(self, val):\n self.val = val\n self.shape = onp.shape(val)\n # canonicalized self.dtype doesn't necessarily match self.val\n self.dtype = onp.dtype(xla_bridge.canonicalize_dtype(onp.result_type(val)))\n assert self.dtype != onp.dtype('O')\n\n def __eq__(self, other):\n return (type(self) is type(other) and self.dtype == other.dtype\n and self.shape == other.shape and onp.all(self.val == other.val))\n\n def __hash__(self):\n return id(self.val)\n\n def at_least_vspace(self):\n return ShapedArray(self.shape, self.dtype)\n\n def join(self, other):\n if self == other:\n return self\n elif self.shape == other.shape and self.dtype == other.dtype:\n return ShapedArray(self.shape, self.dtype)\n elif self.dtype == other.dtype:\n return UnshapedArray(self.dtype)\n else:\n raise TypeError(other)\n\n def str_short(self):\n return str(self.val)\n\n\ndef make_shaped_array(x):\n dtype = xla_bridge.canonicalize_dtype(onp.result_type(x))\n return ShapedArray(onp.shape(x), dtype)\n\ndef zeros_like_array(x):\n dtype = xla_bridge.canonicalize_dtype(onp.result_type(x))\n return onp.broadcast_to(onp.array(0, dtype), onp.shape(x))\n\narray_types = [onp.ndarray, onp.float64, onp.float32, onp.float16,\n onp.complex64, onp.complex128,\n onp.int64, onp.int32, onp.int16, onp.int8,\n onp.bool_, onp.uint64, onp.uint32, onp.uint16, onp.uint8,\n complex, float, int, bool]\n\nfor t in array_types:\n core.pytype_aval_mappings[t] = ConcreteArray\n ad_util.jaxval_zeros_likers[t] = zeros_like_array\n", "path": "jax/abstract_arrays.py"}]}
1,997
290
gh_patches_debug_14455
rasdani/github-patches
git_diff
searx__searx-333
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Piratebay engine : update the URL The current URL is https://thepiratebay.se which doesn't respond. This one seems to work https://thepiratebay.mn (see https://en.wikipedia.org/wiki/The_Pirate_Bay ) Another solution : disable / remove the engine in settings.yml --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/engines/piratebay.py` Content: ``` 1 # Piratebay (Videos, Music, Files) 2 # 3 # @website https://thepiratebay.se 4 # @provide-api no (nothing found) 5 # 6 # @using-api no 7 # @results HTML (using search portal) 8 # @stable yes (HTML can change) 9 # @parse url, title, content, seed, leech, magnetlink 10 11 from urlparse import urljoin 12 from cgi import escape 13 from urllib import quote 14 from lxml import html 15 from operator import itemgetter 16 from searx.engines.xpath import extract_text 17 18 # engine dependent config 19 categories = ['videos', 'music', 'files'] 20 paging = True 21 22 # search-url 23 url = 'https://thepiratebay.se/' 24 search_url = url + 'search/{search_term}/{pageno}/99/{search_type}' 25 26 # piratebay specific type-definitions 27 search_types = {'files': '0', 28 'music': '100', 29 'videos': '200'} 30 31 # specific xpath variables 32 magnet_xpath = './/a[@title="Download this torrent using magnet"]' 33 torrent_xpath = './/a[@title="Download this torrent"]' 34 content_xpath = './/font[@class="detDesc"]' 35 36 37 # do search-request 38 def request(query, params): 39 search_type = search_types.get(params['category'], '0') 40 41 params['url'] = search_url.format(search_term=quote(query), 42 search_type=search_type, 43 pageno=params['pageno'] - 1) 44 45 # FIX: SSLError: hostname 'kthepiratebay.se' 46 # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com' 47 params['verify'] = False 48 49 return params 50 51 52 # get response from search-request 53 def response(resp): 54 results = [] 55 56 dom = html.fromstring(resp.text) 57 58 search_res = dom.xpath('//table[@id="searchResult"]//tr') 59 60 # return empty array if nothing is found 61 if not search_res: 62 return [] 63 64 # parse results 65 for result in search_res[1:]: 66 link = result.xpath('.//div[@class="detName"]//a')[0] 67 href = urljoin(url, link.attrib.get('href')) 68 title = extract_text(link) 69 content = escape(extract_text(result.xpath(content_xpath))) 70 seed, leech = result.xpath('.//td[@align="right"]/text()')[:2] 71 72 # convert seed to int if possible 73 if seed.isdigit(): 74 seed = int(seed) 75 else: 76 seed = 0 77 78 # convert leech to int if possible 79 if leech.isdigit(): 80 leech = int(leech) 81 else: 82 leech = 0 83 84 magnetlink = result.xpath(magnet_xpath)[0] 85 torrentfile_links = result.xpath(torrent_xpath) 86 if torrentfile_links: 87 torrentfile_link = torrentfile_links[0].attrib.get('href') 88 else: 89 torrentfile_link = None 90 91 # append result 92 results.append({'url': href, 93 'title': title, 94 'content': content, 95 'seed': seed, 96 'leech': leech, 97 'magnetlink': magnetlink.attrib.get('href'), 98 'torrentfile': torrentfile_link, 99 'template': 'torrent.html'}) 100 101 # return results sorted by seeder 102 return sorted(results, key=itemgetter('seed'), reverse=True) 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py --- a/searx/engines/piratebay.py +++ b/searx/engines/piratebay.py @@ -20,7 +20,7 @@ paging = True # search-url -url = 'https://thepiratebay.se/' +url = 'https://thepiratebay.am/' search_url = url + 'search/{search_term}/{pageno}/99/{search_type}' # piratebay specific type-definitions @@ -42,10 +42,6 @@ search_type=search_type, pageno=params['pageno'] - 1) - # FIX: SSLError: hostname 'kthepiratebay.se' - # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com' - params['verify'] = False - return params
{"golden_diff": "diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py\n--- a/searx/engines/piratebay.py\n+++ b/searx/engines/piratebay.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://thepiratebay.se/'\n+url = 'https://thepiratebay.am/'\n search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n \n # piratebay specific type-definitions\n@@ -42,10 +42,6 @@\n search_type=search_type,\n pageno=params['pageno'] - 1)\n \n- # FIX: SSLError: hostname 'kthepiratebay.se'\n- # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com'\n- params['verify'] = False\n-\n return params\n", "issue": "Piratebay engine : update the URL\nThe current URL is https://thepiratebay.se which doesn't respond.\nThis one seems to work https://thepiratebay.mn (see https://en.wikipedia.org/wiki/The_Pirate_Bay )\n\nAnother solution : disable / remove the engine in settings.yml\n\n", "before_files": [{"content": "# Piratebay (Videos, Music, Files)\n#\n# @website https://thepiratebay.se\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://thepiratebay.se/'\nsearch_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n\n# piratebay specific type-definitions\nsearch_types = {'files': '0',\n 'music': '100',\n 'videos': '200'}\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Download this torrent using magnet\"]'\ntorrent_xpath = './/a[@title=\"Download this torrent\"]'\ncontent_xpath = './/font[@class=\"detDesc\"]'\n\n\n# do search-request\ndef request(query, params):\n search_type = search_types.get(params['category'], '0')\n\n params['url'] = search_url.format(search_term=quote(query),\n search_type=search_type,\n pageno=params['pageno'] - 1)\n\n # FIX: SSLError: hostname 'kthepiratebay.se'\n # doesn't match either of 'ssl2000.cloudflare.com', 'cloudflare.com', '*.cloudflare.com'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@id=\"searchResult\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//div[@class=\"detName\"]//a')[0]\n href = urljoin(url, link.attrib.get('href'))\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed, leech = result.xpath('.//td[@align=\"right\"]/text()')[:2]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n magnetlink = result.xpath(magnet_xpath)[0]\n torrentfile_links = result.xpath(torrent_xpath)\n if torrentfile_links:\n torrentfile_link = torrentfile_links[0].attrib.get('href')\n else:\n torrentfile_link = None\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'magnetlink': magnetlink.attrib.get('href'),\n 'torrentfile': torrentfile_link,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/piratebay.py"}], "after_files": [{"content": "# Piratebay (Videos, Music, Files)\n#\n# @website https://thepiratebay.se\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://thepiratebay.am/'\nsearch_url = url + 'search/{search_term}/{pageno}/99/{search_type}'\n\n# piratebay specific type-definitions\nsearch_types = {'files': '0',\n 'music': '100',\n 'videos': '200'}\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Download this torrent using magnet\"]'\ntorrent_xpath = './/a[@title=\"Download this torrent\"]'\ncontent_xpath = './/font[@class=\"detDesc\"]'\n\n\n# do search-request\ndef request(query, params):\n search_type = search_types.get(params['category'], '0')\n\n params['url'] = search_url.format(search_term=quote(query),\n search_type=search_type,\n pageno=params['pageno'] - 1)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@id=\"searchResult\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//div[@class=\"detName\"]//a')[0]\n href = urljoin(url, link.attrib.get('href'))\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed, leech = result.xpath('.//td[@align=\"right\"]/text()')[:2]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n magnetlink = result.xpath(magnet_xpath)[0]\n torrentfile_links = result.xpath(torrent_xpath)\n if torrentfile_links:\n torrentfile_link = torrentfile_links[0].attrib.get('href')\n else:\n torrentfile_link = None\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'magnetlink': magnetlink.attrib.get('href'),\n 'torrentfile': torrentfile_link,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/piratebay.py"}]}
1,276
215
gh_patches_debug_51099
rasdani/github-patches
git_diff
ivy-llc__ivy-17901
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fmin --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/math.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back 5 6 7 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 8 @to_ivy_arrays_and_back 9 def sin(x, name=None): 10 return ivy.sin(x) 11 12 13 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 14 @to_ivy_arrays_and_back 15 def cos(x, name=None): 16 return ivy.cos(x) 17 18 19 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 20 @to_ivy_arrays_and_back 21 def acos(x, name=None): 22 return ivy.acos(x) 23 24 25 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 26 @to_ivy_arrays_and_back 27 def cosh(x, name=None): 28 return ivy.cosh(x) 29 30 31 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 32 @to_ivy_arrays_and_back 33 def tanh(x, name=None): 34 return ivy.tanh(x) 35 36 37 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 38 @to_ivy_arrays_and_back 39 def acosh(x, name=None): 40 return ivy.acosh(x) 41 42 43 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 44 @to_ivy_arrays_and_back 45 def asin(x, name=None): 46 return ivy.asin(x) 47 48 49 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 50 @to_ivy_arrays_and_back 51 def log(x, name=None): 52 return ivy.log(x) 53 54 55 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 56 @to_ivy_arrays_and_back 57 def divide(x, y, name=None): 58 return ivy.divide(x, y) 59 60 61 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 62 @to_ivy_arrays_and_back 63 def abs(x, name=None): 64 return ivy.abs(x) 65 66 67 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 68 @to_ivy_arrays_and_back 69 def multiply(x, y, name=None): 70 return ivy.multiply(x, y) 71 72 73 @with_unsupported_dtypes( 74 {"2.5.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle" 75 ) 76 @to_ivy_arrays_and_back 77 def add(x, y, name=None): 78 return ivy.add(x, y) 79 80 81 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 82 @to_ivy_arrays_and_back 83 def subtract(x, y, name=None): 84 return ivy.subtract(x, y) 85 86 87 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 88 @to_ivy_arrays_and_back 89 def sqrt(x, name=None): 90 return ivy.sqrt(x) 91 92 93 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 94 @to_ivy_arrays_and_back 95 def atanh(x, name=None): 96 return ivy.atanh(x) 97 98 99 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 100 @to_ivy_arrays_and_back 101 def atan(x, name=None): 102 return ivy.atan(x) 103 104 105 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 106 @to_ivy_arrays_and_back 107 def round(x, name=None): 108 return ivy.round(x) 109 110 111 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 112 @to_ivy_arrays_and_back 113 def ceil(x, name=None): 114 return ivy.ceil(x) 115 116 117 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 118 @to_ivy_arrays_and_back 119 def sinh(x, name=None): 120 return ivy.sinh(x) 121 122 123 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 124 @to_ivy_arrays_and_back 125 def pow(x, y, name=None): 126 return ivy.pow(x, y) 127 128 129 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle") 130 @to_ivy_arrays_and_back 131 def conj(x, name=None): 132 return ivy.conj(x) 133 134 135 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 136 @to_ivy_arrays_and_back 137 def floor(x, name=None): 138 return ivy.floor(x) 139 140 141 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 142 @to_ivy_arrays_and_back 143 def remainder(x, y, name=None): 144 return ivy.remainder(x, y) 145 146 147 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 148 @to_ivy_arrays_and_back 149 def log2(x, name=None): 150 return ivy.log2(x) 151 152 153 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 154 @to_ivy_arrays_and_back 155 def log1p(x, name=None): 156 return ivy.log1p(x) 157 158 159 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 160 @to_ivy_arrays_and_back 161 def rad2deg(x, name=None): 162 return ivy.rad2deg(x) 163 164 165 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 166 @to_ivy_arrays_and_back 167 def deg2rad(x, name=None): 168 return ivy.deg2rad(x) 169 170 171 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 172 @to_ivy_arrays_and_back 173 def gcd(x, y, name=None): 174 return ivy.gcd(x, y) 175 176 177 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 178 @to_ivy_arrays_and_back 179 def tan(x, name=None): 180 return ivy.tan(x) 181 182 183 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 184 @to_ivy_arrays_and_back 185 def atan2(x, y, name=None): 186 return ivy.atan2(x, y) 187 188 189 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 190 @to_ivy_arrays_and_back 191 def square(x, name=None): 192 return ivy.square(x) 193 194 195 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 196 @to_ivy_arrays_and_back 197 def sign(x, name=None): 198 return ivy.sign(x) 199 200 201 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 202 @to_ivy_arrays_and_back 203 def neg(x, name=None): 204 return ivy.negative(x) 205 206 207 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 208 @to_ivy_arrays_and_back 209 def exp(x, name=None): 210 return ivy.exp(x) 211 212 213 @with_supported_dtypes( 214 { 215 "2.4.2 and below": ( 216 "float32", 217 "float64", 218 "int32", 219 "int64", 220 "complex64", 221 "complex128", 222 ) 223 }, 224 "paddle", 225 ) 226 @to_ivy_arrays_and_back 227 def cumprod(x, dim=None, dtype=None, name=None): 228 return ivy.cumprod(x, axis=dim, dtype=dtype) 229 230 231 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 232 @to_ivy_arrays_and_back 233 def reciprocal(x, name=None): 234 return ivy.reciprocal(x) 235 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -232,3 +232,9 @@ @to_ivy_arrays_and_back def reciprocal(x, name=None): return ivy.reciprocal(x) + + +@with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle") +@to_ivy_arrays_and_back +def fmin(x, y, name=None): + return ivy.fmin(x, y)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -232,3 +232,9 @@\n @to_ivy_arrays_and_back\n def reciprocal(x, name=None):\n return ivy.reciprocal(x)\n+\n+\n+@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def fmin(x, y, name=None):\n+ return ivy.fmin(x, y)\n", "issue": "fmin\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
3,006
157
gh_patches_debug_39378
rasdani/github-patches
git_diff
nltk__nltk-2920
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Consider deprecating Python 3.6 Hello! Python 3.6 is approaching its End Of Life on December 23rd, and we should consider dropping support for Python 3.6 accordingly. In particular, this involves modifying: * CI tests * `python_requires` in `setup.py`. * Lots of documentation throughout the project. Among other places, I'm sure. - Tom Aarsen Add CI tests for Python 3.10 Hello! Python 3.10 is no longer a glimmer far into the future, and we should start supporting it. The first step in doing so is adding 3.10 to the CI tests, and mentioning that NLTK works for Python 3.10 in our documentation throughout the project. - Tom Aarsen --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # Setup script for the Natural Language Toolkit 4 # 5 # Copyright (C) 2001-2021 NLTK Project 6 # Author: NLTK Team <[email protected]> 7 # URL: <https://www.nltk.org/> 8 # For license information, see LICENSE.TXT 9 10 # Work around mbcs bug in distutils. 11 # https://bugs.python.org/issue10945 12 import codecs 13 14 try: 15 codecs.lookup("mbcs") 16 except LookupError: 17 ascii = codecs.lookup("ascii") 18 func = lambda name, enc=ascii: {True: enc}.get(name == "mbcs") 19 codecs.register(func) 20 21 import os 22 23 # Use the VERSION file to get NLTK version 24 version_file = os.path.join(os.path.dirname(__file__), "nltk", "VERSION") 25 with open(version_file) as fh: 26 nltk_version = fh.read().strip() 27 28 # setuptools 29 from setuptools import find_packages, setup 30 31 # Specify groups of optional dependencies 32 extras_require = { 33 "machine_learning": [ 34 "numpy", 35 "python-crfsuite", 36 "scikit-learn", 37 "scipy", 38 ], 39 "plot": ["matplotlib"], 40 "tgrep": ["pyparsing"], 41 "twitter": ["twython"], 42 "corenlp": ["requests"], 43 } 44 45 # Add a group made up of all optional dependencies 46 extras_require["all"] = { 47 package for group in extras_require.values() for package in group 48 } 49 50 # Adds CLI commands 51 console_scripts = """ 52 [console_scripts] 53 nltk=nltk.cli:cli 54 """ 55 56 _project_homepage = "https://www.nltk.org/" 57 58 setup( 59 name="nltk", 60 description="Natural Language Toolkit", 61 version=nltk_version, 62 url=_project_homepage, 63 project_urls={ 64 "Documentation": _project_homepage, 65 "Source Code": "https://github.com/nltk/nltk", 66 "Issue Tracker": "https://github.com/nltk/nltk/issues", 67 }, 68 long_description="""\ 69 The Natural Language Toolkit (NLTK) is a Python package for 70 natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.""", 71 license="Apache License, Version 2.0", 72 keywords=[ 73 "NLP", 74 "CL", 75 "natural language processing", 76 "computational linguistics", 77 "parsing", 78 "tagging", 79 "tokenizing", 80 "syntax", 81 "linguistics", 82 "language", 83 "natural language", 84 "text analytics", 85 ], 86 maintainer="NLTK Team", 87 maintainer_email="[email protected]", 88 author="NLTK Team", 89 author_email="[email protected]", 90 classifiers=[ 91 "Development Status :: 5 - Production/Stable", 92 "Intended Audience :: Developers", 93 "Intended Audience :: Education", 94 "Intended Audience :: Information Technology", 95 "Intended Audience :: Science/Research", 96 "License :: OSI Approved :: Apache Software License", 97 "Operating System :: OS Independent", 98 "Programming Language :: Python :: 3.6", 99 "Programming Language :: Python :: 3.7", 100 "Programming Language :: Python :: 3.8", 101 "Programming Language :: Python :: 3.9", 102 "Topic :: Scientific/Engineering", 103 "Topic :: Scientific/Engineering :: Artificial Intelligence", 104 "Topic :: Scientific/Engineering :: Human Machine Interfaces", 105 "Topic :: Scientific/Engineering :: Information Analysis", 106 "Topic :: Text Processing", 107 "Topic :: Text Processing :: Filters", 108 "Topic :: Text Processing :: General", 109 "Topic :: Text Processing :: Indexing", 110 "Topic :: Text Processing :: Linguistic", 111 ], 112 package_data={"nltk": ["test/*.doctest", "VERSION"]}, 113 python_requires=">=3.6", 114 install_requires=[ 115 "click", 116 "joblib", 117 "regex>=2021.8.3", 118 "tqdm", 119 ], 120 extras_require=extras_require, 121 packages=find_packages(), 122 zip_safe=False, # since normal files will be present too? 123 entry_points=console_scripts, 124 ) 125 ``` Path: `nltk/__init__.py` Content: ``` 1 # Natural Language Toolkit (NLTK) 2 # 3 # Copyright (C) 2001-2021 NLTK Project 4 # Authors: Steven Bird <[email protected]> 5 # Edward Loper <[email protected]> 6 # URL: <https://www.nltk.org/> 7 # For license information, see LICENSE.TXT 8 9 """ 10 The Natural Language Toolkit (NLTK) is an open source Python library 11 for Natural Language Processing. A free online book is available. 12 (If you use the library for academic research, please cite the book.) 13 14 Steven Bird, Ewan Klein, and Edward Loper (2009). 15 Natural Language Processing with Python. O'Reilly Media Inc. 16 https://www.nltk.org/book/ 17 18 isort:skip_file 19 """ 20 21 import os 22 23 # ////////////////////////////////////////////////////// 24 # Metadata 25 # ////////////////////////////////////////////////////// 26 27 # Version. For each new release, the version number should be updated 28 # in the file VERSION. 29 try: 30 # If a VERSION file exists, use it! 31 version_file = os.path.join(os.path.dirname(__file__), "VERSION") 32 with open(version_file) as infile: 33 __version__ = infile.read().strip() 34 except NameError: 35 __version__ = "unknown (running code interactively?)" 36 except OSError as ex: 37 __version__ = "unknown (%s)" % ex 38 39 if __doc__ is not None: # fix for the ``python -OO`` 40 __doc__ += "\n@version: " + __version__ 41 42 43 # Copyright notice 44 __copyright__ = """\ 45 Copyright (C) 2001-2021 NLTK Project. 46 47 Distributed and Licensed under the Apache License, Version 2.0, 48 which is included by reference. 49 """ 50 51 __license__ = "Apache License, Version 2.0" 52 # Description of the toolkit, keywords, and the project's primary URL. 53 __longdescr__ = """\ 54 The Natural Language Toolkit (NLTK) is a Python package for 55 natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.""" 56 __keywords__ = [ 57 "NLP", 58 "CL", 59 "natural language processing", 60 "computational linguistics", 61 "parsing", 62 "tagging", 63 "tokenizing", 64 "syntax", 65 "linguistics", 66 "language", 67 "natural language", 68 "text analytics", 69 ] 70 __url__ = "https://www.nltk.org/" 71 72 # Maintainer, contributors, etc. 73 __maintainer__ = "NLTK Team" 74 __maintainer_email__ = "[email protected]" 75 __author__ = __maintainer__ 76 __author_email__ = __maintainer_email__ 77 78 # "Trove" classifiers for Python Package Index. 79 __classifiers__ = [ 80 "Development Status :: 5 - Production/Stable", 81 "Intended Audience :: Developers", 82 "Intended Audience :: Education", 83 "Intended Audience :: Information Technology", 84 "Intended Audience :: Science/Research", 85 "License :: OSI Approved :: Apache Software License", 86 "Operating System :: OS Independent", 87 "Programming Language :: Python :: 3.6", 88 "Programming Language :: Python :: 3.7", 89 "Programming Language :: Python :: 3.8", 90 "Programming Language :: Python :: 3.9", 91 "Topic :: Scientific/Engineering", 92 "Topic :: Scientific/Engineering :: Artificial Intelligence", 93 "Topic :: Scientific/Engineering :: Human Machine Interfaces", 94 "Topic :: Scientific/Engineering :: Information Analysis", 95 "Topic :: Text Processing", 96 "Topic :: Text Processing :: Filters", 97 "Topic :: Text Processing :: General", 98 "Topic :: Text Processing :: Indexing", 99 "Topic :: Text Processing :: Linguistic", 100 ] 101 102 from nltk.internals import config_java 103 104 # support numpy from pypy 105 try: 106 import numpypy 107 except ImportError: 108 pass 109 110 # Override missing methods on environments where it cannot be used like GAE. 111 import subprocess 112 113 if not hasattr(subprocess, "PIPE"): 114 115 def _fake_PIPE(*args, **kwargs): 116 raise NotImplementedError("subprocess.PIPE is not supported.") 117 118 subprocess.PIPE = _fake_PIPE 119 if not hasattr(subprocess, "Popen"): 120 121 def _fake_Popen(*args, **kwargs): 122 raise NotImplementedError("subprocess.Popen is not supported.") 123 124 subprocess.Popen = _fake_Popen 125 126 ########################################################### 127 # TOP-LEVEL MODULES 128 ########################################################### 129 130 # Import top-level functionality into top-level namespace 131 132 from nltk.collocations import * 133 from nltk.decorators import decorator, memoize 134 from nltk.featstruct import * 135 from nltk.grammar import * 136 from nltk.probability import * 137 from nltk.text import * 138 from nltk.util import * 139 from nltk.jsontags import * 140 141 ########################################################### 142 # PACKAGES 143 ########################################################### 144 145 from nltk.chunk import * 146 from nltk.classify import * 147 from nltk.inference import * 148 from nltk.metrics import * 149 from nltk.parse import * 150 from nltk.tag import * 151 from nltk.tokenize import * 152 from nltk.translate import * 153 from nltk.tree import * 154 from nltk.sem import * 155 from nltk.stem import * 156 157 # Packages which can be lazily imported 158 # (a) we don't import * 159 # (b) they're slow to import or have run-time dependencies 160 # that can safely fail at run time 161 162 from nltk import lazyimport 163 164 app = lazyimport.LazyModule("nltk.app", locals(), globals()) 165 chat = lazyimport.LazyModule("nltk.chat", locals(), globals()) 166 corpus = lazyimport.LazyModule("nltk.corpus", locals(), globals()) 167 draw = lazyimport.LazyModule("nltk.draw", locals(), globals()) 168 toolbox = lazyimport.LazyModule("nltk.toolbox", locals(), globals()) 169 170 # Optional loading 171 172 try: 173 import numpy 174 except ImportError: 175 pass 176 else: 177 from nltk import cluster 178 179 from nltk.downloader import download, download_shell 180 181 try: 182 import tkinter 183 except ImportError: 184 pass 185 else: 186 try: 187 from nltk.downloader import download_gui 188 except RuntimeError as e: 189 import warnings 190 191 warnings.warn( 192 "Corpus downloader GUI not loaded " 193 "(RuntimeError during import: %s)" % str(e) 194 ) 195 196 # explicitly import all top-level modules (ensuring 197 # they override the same names inadvertently imported 198 # from a subpackage) 199 200 from nltk import ccg, chunk, classify, collocations 201 from nltk import data, featstruct, grammar, help, inference, metrics 202 from nltk import misc, parse, probability, sem, stem, wsd 203 from nltk import tag, tbl, text, tokenize, translate, tree, util 204 205 206 # FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116 207 def demo(): 208 print("To run the demo code for a module, type nltk.module.demo()") 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nltk/__init__.py b/nltk/__init__.py --- a/nltk/__init__.py +++ b/nltk/__init__.py @@ -52,7 +52,7 @@ # Description of the toolkit, keywords, and the project's primary URL. __longdescr__ = """\ The Natural Language Toolkit (NLTK) is a Python package for -natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.""" +natural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.""" __keywords__ = [ "NLP", "CL", @@ -84,10 +84,10 @@ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Human Machine Interfaces", diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ }, long_description="""\ The Natural Language Toolkit (NLTK) is a Python package for -natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.""", +natural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.""", license="Apache License, Version 2.0", keywords=[ "NLP", @@ -95,10 +95,10 @@ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Human Machine Interfaces", @@ -110,7 +110,7 @@ "Topic :: Text Processing :: Linguistic", ], package_data={"nltk": ["test/*.doctest", "VERSION"]}, - python_requires=">=3.6", + python_requires=">=3.7", install_requires=[ "click", "joblib",
{"golden_diff": "diff --git a/nltk/__init__.py b/nltk/__init__.py\n--- a/nltk/__init__.py\n+++ b/nltk/__init__.py\n@@ -52,7 +52,7 @@\n # Description of the toolkit, keywords, and the project's primary URL.\n __longdescr__ = \"\"\"\\\n The Natural Language Toolkit (NLTK) is a Python package for\n-natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\"\n+natural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.\"\"\"\n __keywords__ = [\n \"NLP\",\n \"CL\",\n@@ -84,10 +84,10 @@\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n- \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,7 +67,7 @@\n },\n long_description=\"\"\"\\\n The Natural Language Toolkit (NLTK) is a Python package for\n-natural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\",\n+natural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.\"\"\",\n license=\"Apache License, Version 2.0\",\n keywords=[\n \"NLP\",\n@@ -95,10 +95,10 @@\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n- \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n@@ -110,7 +110,7 @@\n \"Topic :: Text Processing :: Linguistic\",\n ],\n package_data={\"nltk\": [\"test/*.doctest\", \"VERSION\"]},\n- python_requires=\">=3.6\",\n+ python_requires=\">=3.7\",\n install_requires=[\n \"click\",\n \"joblib\",\n", "issue": "Consider deprecating Python 3.6\nHello!\r\n\r\nPython 3.6 is approaching its End Of Life on December 23rd, and we should consider dropping support for Python 3.6 accordingly. In particular, this involves modifying:\r\n* CI tests\r\n* `python_requires` in `setup.py`.\r\n* Lots of documentation throughout the project.\r\n\r\nAmong other places, I'm sure.\r\n\r\n- Tom Aarsen\nAdd CI tests for Python 3.10\nHello!\r\n\r\nPython 3.10 is no longer a glimmer far into the future, and we should start supporting it. The first step in doing so is adding 3.10 to the CI tests, and mentioning that NLTK works for Python 3.10 in our documentation throughout the project.\r\n\r\n- Tom Aarsen\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Setup script for the Natural Language Toolkit\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: NLTK Team <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n# Work around mbcs bug in distutils.\n# https://bugs.python.org/issue10945\nimport codecs\n\ntry:\n codecs.lookup(\"mbcs\")\nexcept LookupError:\n ascii = codecs.lookup(\"ascii\")\n func = lambda name, enc=ascii: {True: enc}.get(name == \"mbcs\")\n codecs.register(func)\n\nimport os\n\n# Use the VERSION file to get NLTK version\nversion_file = os.path.join(os.path.dirname(__file__), \"nltk\", \"VERSION\")\nwith open(version_file) as fh:\n nltk_version = fh.read().strip()\n\n# setuptools\nfrom setuptools import find_packages, setup\n\n# Specify groups of optional dependencies\nextras_require = {\n \"machine_learning\": [\n \"numpy\",\n \"python-crfsuite\",\n \"scikit-learn\",\n \"scipy\",\n ],\n \"plot\": [\"matplotlib\"],\n \"tgrep\": [\"pyparsing\"],\n \"twitter\": [\"twython\"],\n \"corenlp\": [\"requests\"],\n}\n\n# Add a group made up of all optional dependencies\nextras_require[\"all\"] = {\n package for group in extras_require.values() for package in group\n}\n\n# Adds CLI commands\nconsole_scripts = \"\"\"\n[console_scripts]\nnltk=nltk.cli:cli\n\"\"\"\n\n_project_homepage = \"https://www.nltk.org/\"\n\nsetup(\n name=\"nltk\",\n description=\"Natural Language Toolkit\",\n version=nltk_version,\n url=_project_homepage,\n project_urls={\n \"Documentation\": _project_homepage,\n \"Source Code\": \"https://github.com/nltk/nltk\",\n \"Issue Tracker\": \"https://github.com/nltk/nltk/issues\",\n },\n long_description=\"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\",\n license=\"Apache License, Version 2.0\",\n keywords=[\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n ],\n maintainer=\"NLTK Team\",\n maintainer_email=\"[email protected]\",\n author=\"NLTK Team\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n ],\n package_data={\"nltk\": [\"test/*.doctest\", \"VERSION\"]},\n python_requires=\">=3.6\",\n install_requires=[\n \"click\",\n \"joblib\",\n \"regex>=2021.8.3\",\n \"tqdm\",\n ],\n extras_require=extras_require,\n packages=find_packages(),\n zip_safe=False, # since normal files will be present too?\n entry_points=console_scripts,\n)\n", "path": "setup.py"}, {"content": "# Natural Language Toolkit (NLTK)\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Authors: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nThe Natural Language Toolkit (NLTK) is an open source Python library\nfor Natural Language Processing. A free online book is available.\n(If you use the library for academic research, please cite the book.)\n\nSteven Bird, Ewan Klein, and Edward Loper (2009).\nNatural Language Processing with Python. O'Reilly Media Inc.\nhttps://www.nltk.org/book/\n\nisort:skip_file\n\"\"\"\n\nimport os\n\n# //////////////////////////////////////////////////////\n# Metadata\n# //////////////////////////////////////////////////////\n\n# Version. For each new release, the version number should be updated\n# in the file VERSION.\ntry:\n # If a VERSION file exists, use it!\n version_file = os.path.join(os.path.dirname(__file__), \"VERSION\")\n with open(version_file) as infile:\n __version__ = infile.read().strip()\nexcept NameError:\n __version__ = \"unknown (running code interactively?)\"\nexcept OSError as ex:\n __version__ = \"unknown (%s)\" % ex\n\nif __doc__ is not None: # fix for the ``python -OO``\n __doc__ += \"\\n@version: \" + __version__\n\n\n# Copyright notice\n__copyright__ = \"\"\"\\\nCopyright (C) 2001-2021 NLTK Project.\n\nDistributed and Licensed under the Apache License, Version 2.0,\nwhich is included by reference.\n\"\"\"\n\n__license__ = \"Apache License, Version 2.0\"\n# Description of the toolkit, keywords, and the project's primary URL.\n__longdescr__ = \"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.6, 3.7, 3.8, or 3.9.\"\"\"\n__keywords__ = [\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n]\n__url__ = \"https://www.nltk.org/\"\n\n# Maintainer, contributors, etc.\n__maintainer__ = \"NLTK Team\"\n__maintainer_email__ = \"[email protected]\"\n__author__ = __maintainer__\n__author_email__ = __maintainer_email__\n\n# \"Trove\" classifiers for Python Package Index.\n__classifiers__ = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n]\n\nfrom nltk.internals import config_java\n\n# support numpy from pypy\ntry:\n import numpypy\nexcept ImportError:\n pass\n\n# Override missing methods on environments where it cannot be used like GAE.\nimport subprocess\n\nif not hasattr(subprocess, \"PIPE\"):\n\n def _fake_PIPE(*args, **kwargs):\n raise NotImplementedError(\"subprocess.PIPE is not supported.\")\n\n subprocess.PIPE = _fake_PIPE\nif not hasattr(subprocess, \"Popen\"):\n\n def _fake_Popen(*args, **kwargs):\n raise NotImplementedError(\"subprocess.Popen is not supported.\")\n\n subprocess.Popen = _fake_Popen\n\n###########################################################\n# TOP-LEVEL MODULES\n###########################################################\n\n# Import top-level functionality into top-level namespace\n\nfrom nltk.collocations import *\nfrom nltk.decorators import decorator, memoize\nfrom nltk.featstruct import *\nfrom nltk.grammar import *\nfrom nltk.probability import *\nfrom nltk.text import *\nfrom nltk.util import *\nfrom nltk.jsontags import *\n\n###########################################################\n# PACKAGES\n###########################################################\n\nfrom nltk.chunk import *\nfrom nltk.classify import *\nfrom nltk.inference import *\nfrom nltk.metrics import *\nfrom nltk.parse import *\nfrom nltk.tag import *\nfrom nltk.tokenize import *\nfrom nltk.translate import *\nfrom nltk.tree import *\nfrom nltk.sem import *\nfrom nltk.stem import *\n\n# Packages which can be lazily imported\n# (a) we don't import *\n# (b) they're slow to import or have run-time dependencies\n# that can safely fail at run time\n\nfrom nltk import lazyimport\n\napp = lazyimport.LazyModule(\"nltk.app\", locals(), globals())\nchat = lazyimport.LazyModule(\"nltk.chat\", locals(), globals())\ncorpus = lazyimport.LazyModule(\"nltk.corpus\", locals(), globals())\ndraw = lazyimport.LazyModule(\"nltk.draw\", locals(), globals())\ntoolbox = lazyimport.LazyModule(\"nltk.toolbox\", locals(), globals())\n\n# Optional loading\n\ntry:\n import numpy\nexcept ImportError:\n pass\nelse:\n from nltk import cluster\n\nfrom nltk.downloader import download, download_shell\n\ntry:\n import tkinter\nexcept ImportError:\n pass\nelse:\n try:\n from nltk.downloader import download_gui\n except RuntimeError as e:\n import warnings\n\n warnings.warn(\n \"Corpus downloader GUI not loaded \"\n \"(RuntimeError during import: %s)\" % str(e)\n )\n\n# explicitly import all top-level modules (ensuring\n# they override the same names inadvertently imported\n# from a subpackage)\n\nfrom nltk import ccg, chunk, classify, collocations\nfrom nltk import data, featstruct, grammar, help, inference, metrics\nfrom nltk import misc, parse, probability, sem, stem, wsd\nfrom nltk import tag, tbl, text, tokenize, translate, tree, util\n\n\n# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116\ndef demo():\n print(\"To run the demo code for a module, type nltk.module.demo()\")\n", "path": "nltk/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Setup script for the Natural Language Toolkit\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: NLTK Team <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n# Work around mbcs bug in distutils.\n# https://bugs.python.org/issue10945\nimport codecs\n\ntry:\n codecs.lookup(\"mbcs\")\nexcept LookupError:\n ascii = codecs.lookup(\"ascii\")\n func = lambda name, enc=ascii: {True: enc}.get(name == \"mbcs\")\n codecs.register(func)\n\nimport os\n\n# Use the VERSION file to get NLTK version\nversion_file = os.path.join(os.path.dirname(__file__), \"nltk\", \"VERSION\")\nwith open(version_file) as fh:\n nltk_version = fh.read().strip()\n\n# setuptools\nfrom setuptools import find_packages, setup\n\n# Specify groups of optional dependencies\nextras_require = {\n \"machine_learning\": [\n \"numpy\",\n \"python-crfsuite\",\n \"scikit-learn\",\n \"scipy\",\n ],\n \"plot\": [\"matplotlib\"],\n \"tgrep\": [\"pyparsing\"],\n \"twitter\": [\"twython\"],\n \"corenlp\": [\"requests\"],\n}\n\n# Add a group made up of all optional dependencies\nextras_require[\"all\"] = {\n package for group in extras_require.values() for package in group\n}\n\n# Adds CLI commands\nconsole_scripts = \"\"\"\n[console_scripts]\nnltk=nltk.cli:cli\n\"\"\"\n\n_project_homepage = \"https://www.nltk.org/\"\n\nsetup(\n name=\"nltk\",\n description=\"Natural Language Toolkit\",\n version=nltk_version,\n url=_project_homepage,\n project_urls={\n \"Documentation\": _project_homepage,\n \"Source Code\": \"https://github.com/nltk/nltk\",\n \"Issue Tracker\": \"https://github.com/nltk/nltk/issues\",\n },\n long_description=\"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.\"\"\",\n license=\"Apache License, Version 2.0\",\n keywords=[\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n ],\n maintainer=\"NLTK Team\",\n maintainer_email=\"[email protected]\",\n author=\"NLTK Team\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n ],\n package_data={\"nltk\": [\"test/*.doctest\", \"VERSION\"]},\n python_requires=\">=3.7\",\n install_requires=[\n \"click\",\n \"joblib\",\n \"regex>=2021.8.3\",\n \"tqdm\",\n ],\n extras_require=extras_require,\n packages=find_packages(),\n zip_safe=False, # since normal files will be present too?\n entry_points=console_scripts,\n)\n", "path": "setup.py"}, {"content": "# Natural Language Toolkit (NLTK)\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Authors: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\n\"\"\"\nThe Natural Language Toolkit (NLTK) is an open source Python library\nfor Natural Language Processing. A free online book is available.\n(If you use the library for academic research, please cite the book.)\n\nSteven Bird, Ewan Klein, and Edward Loper (2009).\nNatural Language Processing with Python. O'Reilly Media Inc.\nhttps://www.nltk.org/book/\n\nisort:skip_file\n\"\"\"\n\nimport os\n\n# //////////////////////////////////////////////////////\n# Metadata\n# //////////////////////////////////////////////////////\n\n# Version. For each new release, the version number should be updated\n# in the file VERSION.\ntry:\n # If a VERSION file exists, use it!\n version_file = os.path.join(os.path.dirname(__file__), \"VERSION\")\n with open(version_file) as infile:\n __version__ = infile.read().strip()\nexcept NameError:\n __version__ = \"unknown (running code interactively?)\"\nexcept OSError as ex:\n __version__ = \"unknown (%s)\" % ex\n\nif __doc__ is not None: # fix for the ``python -OO``\n __doc__ += \"\\n@version: \" + __version__\n\n\n# Copyright notice\n__copyright__ = \"\"\"\\\nCopyright (C) 2001-2021 NLTK Project.\n\nDistributed and Licensed under the Apache License, Version 2.0,\nwhich is included by reference.\n\"\"\"\n\n__license__ = \"Apache License, Version 2.0\"\n# Description of the toolkit, keywords, and the project's primary URL.\n__longdescr__ = \"\"\"\\\nThe Natural Language Toolkit (NLTK) is a Python package for\nnatural language processing. NLTK requires Python 3.7, 3.8, 3.9 or 3.10.\"\"\"\n__keywords__ = [\n \"NLP\",\n \"CL\",\n \"natural language processing\",\n \"computational linguistics\",\n \"parsing\",\n \"tagging\",\n \"tokenizing\",\n \"syntax\",\n \"linguistics\",\n \"language\",\n \"natural language\",\n \"text analytics\",\n]\n__url__ = \"https://www.nltk.org/\"\n\n# Maintainer, contributors, etc.\n__maintainer__ = \"NLTK Team\"\n__maintainer_email__ = \"[email protected]\"\n__author__ = __maintainer__\n__author_email__ = __maintainer_email__\n\n# \"Trove\" classifiers for Python Package Index.\n__classifiers__ = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Human Machine Interfaces\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Text Processing\",\n \"Topic :: Text Processing :: Filters\",\n \"Topic :: Text Processing :: General\",\n \"Topic :: Text Processing :: Indexing\",\n \"Topic :: Text Processing :: Linguistic\",\n]\n\nfrom nltk.internals import config_java\n\n# support numpy from pypy\ntry:\n import numpypy\nexcept ImportError:\n pass\n\n# Override missing methods on environments where it cannot be used like GAE.\nimport subprocess\n\nif not hasattr(subprocess, \"PIPE\"):\n\n def _fake_PIPE(*args, **kwargs):\n raise NotImplementedError(\"subprocess.PIPE is not supported.\")\n\n subprocess.PIPE = _fake_PIPE\nif not hasattr(subprocess, \"Popen\"):\n\n def _fake_Popen(*args, **kwargs):\n raise NotImplementedError(\"subprocess.Popen is not supported.\")\n\n subprocess.Popen = _fake_Popen\n\n###########################################################\n# TOP-LEVEL MODULES\n###########################################################\n\n# Import top-level functionality into top-level namespace\n\nfrom nltk.collocations import *\nfrom nltk.decorators import decorator, memoize\nfrom nltk.featstruct import *\nfrom nltk.grammar import *\nfrom nltk.probability import *\nfrom nltk.text import *\nfrom nltk.util import *\nfrom nltk.jsontags import *\n\n###########################################################\n# PACKAGES\n###########################################################\n\nfrom nltk.chunk import *\nfrom nltk.classify import *\nfrom nltk.inference import *\nfrom nltk.metrics import *\nfrom nltk.parse import *\nfrom nltk.tag import *\nfrom nltk.tokenize import *\nfrom nltk.translate import *\nfrom nltk.tree import *\nfrom nltk.sem import *\nfrom nltk.stem import *\n\n# Packages which can be lazily imported\n# (a) we don't import *\n# (b) they're slow to import or have run-time dependencies\n# that can safely fail at run time\n\nfrom nltk import lazyimport\n\napp = lazyimport.LazyModule(\"nltk.app\", locals(), globals())\nchat = lazyimport.LazyModule(\"nltk.chat\", locals(), globals())\ncorpus = lazyimport.LazyModule(\"nltk.corpus\", locals(), globals())\ndraw = lazyimport.LazyModule(\"nltk.draw\", locals(), globals())\ntoolbox = lazyimport.LazyModule(\"nltk.toolbox\", locals(), globals())\n\n# Optional loading\n\ntry:\n import numpy\nexcept ImportError:\n pass\nelse:\n from nltk import cluster\n\nfrom nltk.downloader import download, download_shell\n\ntry:\n import tkinter\nexcept ImportError:\n pass\nelse:\n try:\n from nltk.downloader import download_gui\n except RuntimeError as e:\n import warnings\n\n warnings.warn(\n \"Corpus downloader GUI not loaded \"\n \"(RuntimeError during import: %s)\" % str(e)\n )\n\n# explicitly import all top-level modules (ensuring\n# they override the same names inadvertently imported\n# from a subpackage)\n\nfrom nltk import ccg, chunk, classify, collocations\nfrom nltk import data, featstruct, grammar, help, inference, metrics\nfrom nltk import misc, parse, probability, sem, stem, wsd\nfrom nltk import tag, tbl, text, tokenize, translate, tree, util\n\n\n# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116\ndef demo():\n print(\"To run the demo code for a module, type nltk.module.demo()\")\n", "path": "nltk/__init__.py"}]}
3,590
647
gh_patches_debug_28219
rasdani/github-patches
git_diff
saulpw__visidata-806
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- frequency histogram for number is wrong **Small description** With [this data](https://github.com/saulpw/visidata/files/5543325/a.txt), when type casting year column to number `#`, and asking for frequency `F`, bins are wrong. I works correctly when not type casting. **Expected result** (obtained without type casting and moving 2020 to the top) ``` year count percent histogram 2020 11 7.69 ********************************************* 2019 12 8.39 ************************************************** 2018 12 8.39 ************************************************** 2017 12 8.39 ************************************************** 2016 12 8.39 ************************************************** 2015 12 8.39 ************************************************** 2014 12 8.39 ************************************************** 2013 12 8.39 ************************************************** 2012 12 8.39 ************************************************** 2011 12 8.39 ************************************************** 2010 12 8.39 ************************************************** 2009 12 8.39 ************************************************** ``` **Actual result with screenshot** (typecasting to number) ``` year count percent histogram 2009 12 8.39 ************************** 2010 12 8.39 ************************** 2011 12 8.39 ************************** 2012 12 8.39 ************************** 2013 12 8.39 ************************** 2014 12 8.39 ************************** 2015 12 8.39 ************************** 2016 12 8.39 ************************** 2017 12 8.39 ************************** 2018 12 8.39 ************************** 2019 23 16.08 ************************************************** 2020 0 0.00 ``` **Steps to reproduce with sample data and a .vd** * [data](https://github.com/saulpw/visidata/files/5543325/a.txt) * [commands](https://github.com/saulpw/visidata/files/5543339/a.vd.txt) **Additional context** version `saul.pw/VisiData v2.0.1` it seems there is a limit to 11 bins and the rest is empty. The more I put data, the more is aggregated in the 11th bin. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/pivot.py` Content: ``` 1 import collections 2 from visidata import * 3 4 5 # discrete_keys = tuple of formatted discrete keys that group the row 6 # numeric_key is a range 7 # sourcerows is list(all source.rows in group) 8 # pivotrows is { pivot_values: list(source.rows in group with pivot_values) } 9 PivotGroupRow = collections.namedtuple('PivotGroupRow', 'discrete_keys numeric_key sourcerows pivotrows'.split()) 10 11 def Pivot(source, groupByCols, pivotCols): 12 return PivotSheet('', 13 groupByCols, 14 pivotCols, 15 source=source) 16 17 def makeErrorKey(col): 18 if col.type is date: 19 return date.min # date('2000-01-01') 20 else: 21 return col.type() 22 23 def formatRange(col, numeric_key): 24 a, b = numeric_key 25 nankey = makeErrorKey(col) 26 if a is nankey and b is nankey: 27 return '#ERR' 28 elif a == b: 29 return col.format(a) 30 return ' - '.join(col.format(x) for x in numeric_key) 31 32 class RangeColumn(Column): 33 def format(self, typedval): 34 if typedval is None: 35 return None 36 return formatRange(self.origcol, typedval) 37 38 class PivotSheet(Sheet): 39 'Summarize key columns in pivot table and display as new sheet.' 40 rowtype = 'grouped rows' # rowdef: PivotGroupRow 41 def __init__(self, name, groupByCols, pivotCols, **kwargs): 42 super().__init__(name, **kwargs) 43 44 self.pivotCols = pivotCols # whose values become columns 45 self.groupByCols = groupByCols # whose values become rows 46 47 def isNumericRange(self, col): 48 return isNumeric(col) and self.source.options.numeric_binning 49 50 def initCols(self): 51 self.columns = [] 52 53 # add key columns (grouped by) 54 for colnum, c in enumerate(self.groupByCols): 55 if c in self.pivotCols: 56 continue 57 58 if self.isNumericRange(c): 59 newcol = RangeColumn(c.name, origcol=c, width=c.width and c.width*2, getter=lambda c,r: r.numeric_key) 60 else: 61 newcol = Column(c.name, width=c.width, fmtstr=c.fmtstr, 62 type=c.type if c.type in vd.typemap else anytype, 63 origcol=c, 64 getter=lambda col,row,i=colnum: row.discrete_keys[i], 65 setter=lambda col,row,val,i=colnum: setitem(row.discrete_keys, i, val) and col.origcol.setValues(row.sourcerows, val)) 66 67 self.addColumn(newcol) 68 69 self.setKeys(self.columns) 70 71 def openRow(self, row): 72 'open sheet of source rows aggregated in current pivot row' 73 vs = copy(self.source) 74 vs.name += "_%s"%"+".join(row.discrete_keys) 75 vs.rows = sum(row.pivotrows.values(), []) 76 return vs 77 78 def openCell(self, col, row): 79 'open sheet of source rows aggregated in current pivot cell' 80 vs = copy(self.source) 81 vs.name += "_%s"%col.aggvalue 82 vs.rows = row.pivotrows.get(col.aggvalue, []) 83 return vs 84 85 def reload(self): 86 self.initCols() 87 88 # two different threads for better interactive display 89 self.addAggregateCols() 90 self.groupRows() 91 92 @asyncthread 93 def addAggregateCols(self): 94 # add aggregated columns 95 aggcols = { # [Column] -> list(aggregators) 96 sourcecol: sourcecol.aggregators 97 for sourcecol in self.source.visibleCols 98 if hasattr(sourcecol, 'aggregators') 99 } or { # if pivot given but no aggregators specified 100 sourcecol: [vd.aggregators["count"]] 101 for sourcecol in self.pivotCols 102 } 103 104 if not aggcols: 105 # self.addColumn(ColumnAttr('count', 'sourcerows', type=vlen)) 106 return 107 108 # aggregators without pivot 109 if not self.pivotCols: 110 for aggcol, aggregatorlist in aggcols.items(): 111 for aggregator in aggregatorlist: 112 aggname = '%s_%s' % (aggcol.name, aggregator.name) 113 114 c = Column(aggname, 115 type=aggregator.type or aggcol.type, 116 getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows)) 117 self.addColumn(c) 118 119 # add pivoted columns 120 for pivotcol in self.pivotCols: 121 allValues = set() 122 for value in Progress(pivotcol.getValues(self.source.rows), 'pivoting', total=len(self.source.rows)): 123 if value in allValues: 124 continue 125 allValues.add(value) 126 127 if len(self.pivotCols) > 1: 128 valname = '%s_%s' % (pivotcol.name, value) 129 else: 130 valname = str(value) 131 132 for aggcol, aggregatorlist in aggcols.items(): 133 for aggregator in aggregatorlist: 134 if len(aggcols) > 1: # if more than one aggregated column, include that column name in the new column name 135 aggname = '%s_%s' % (aggcol.name, aggregator.name) 136 else: 137 aggname = aggregator.name 138 139 140 if len(aggregatorlist) > 1 or len(aggcols) > 1: 141 colname = '%s_%s' % (aggname, valname) 142 if not self.name: 143 self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols) 144 else: 145 colname = valname 146 if not self.name: 147 self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols) + '_' + aggname 148 149 c = Column(colname, 150 type=aggregator.type or aggcol.type, 151 aggvalue=value, 152 getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.pivotrows.get(col.aggvalue, []))) 153 self.addColumn(c) 154 155 # if aggregator.name != 'count': # already have count above 156 # c = Column('Total_' + aggcol.name, 157 # type=aggregator.type or aggcol.type, 158 # getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows)) 159 # self.addColumn(c) 160 161 @asyncthread 162 def groupRows(self, rowfunc=None): 163 self.rows = [] 164 165 discreteCols = [c for c in self.groupByCols if not self.isNumericRange(c)] 166 167 numericCols = [c for c in self.groupByCols if self.isNumericRange(c)] 168 169 if len(numericCols) > 1: 170 vd.fail('only one numeric column can be binned') 171 172 numericBins = [] 173 degenerateBinning = False 174 if numericCols: 175 nbins = options.histogram_bins or int(len(self.source.rows) ** (1./2)) 176 vals = tuple(numericCols[0].getValues(self.source.rows)) 177 minval = min(vals) 178 maxval = max(vals) 179 width = (maxval - minval)/nbins 180 181 if width == 0: 182 # only one value (and maybe errors) 183 numericBins = [(minval, maxval)] 184 elif numericCols[0].type in (int, vlen) and nbins > (maxval - minval): 185 # more bins than int vals, just use the vals 186 degenerateBinning = True 187 numericBins = [(minval+i, minval+i) for i in range(maxval-minval+1)] 188 else: 189 numericBins = [(minval+width*i, minval+width*(i+1)) for i in range(nbins)] 190 191 # group rows by their keys (groupByCols), and separate by their pivot values (pivotCols) 192 groups = {} # [formattedDiscreteKeys] -> (numericGroupRows:dict(formattedNumericKeyRange -> PivotGroupRow), groupRow:PivotGroupRow) # groupRow is main/error row 193 194 for sourcerow in Progress(self.source.iterrows(), 'grouping', total=self.source.nRows): 195 discreteKeys = list(forward(origcol.getTypedValue(sourcerow)) for origcol in discreteCols) 196 197 # wrapply will pass-through a key-able TypedWrapper 198 formattedDiscreteKeys = tuple(wrapply(c.format, v) for v, c in zip(discreteKeys, discreteCols)) 199 200 numericGroupRows, groupRow = groups.get(formattedDiscreteKeys, (None, None)) 201 if numericGroupRows is None: 202 # add new group rows 203 numericGroupRows = {formatRange(numericCols[0], numRange): PivotGroupRow(discreteKeys, numRange, [], {}) for numRange in numericBins} 204 groups[formattedDiscreteKeys] = (numericGroupRows, None) 205 for r in numericGroupRows.values(): 206 self.addRow(r) 207 208 # find the grouprow this sourcerow belongs in, by numericbin 209 if numericCols: 210 try: 211 val = numericCols[0].getValue(sourcerow) 212 if val is not None: 213 val = numericCols[0].type(val) 214 if not width: 215 binidx = 0 216 elif degenerateBinning: 217 binidx = val-minval 218 else: 219 binidx = int((val-minval)//width) 220 groupRow = numericGroupRows[formatRange(numericCols[0], numericBins[min(binidx, nbins-1)])] 221 except Exception as e: 222 # leave in main/error bin 223 pass 224 225 # add the main bin if no numeric bin (error, or no numeric cols) 226 if groupRow is None: 227 nankey = makeErrorKey(numericCols[0]) if numericCols else 0 228 groupRow = PivotGroupRow(discreteKeys, (nankey, nankey), [], {}) 229 groups[formattedDiscreteKeys] = (numericGroupRows, groupRow) 230 self.addRow(groupRow) 231 232 # add the sourcerow to its all bin 233 groupRow.sourcerows.append(sourcerow) 234 235 # separate by pivot value 236 for col in self.pivotCols: 237 varval = col.getTypedValue(sourcerow) 238 matchingRows = groupRow.pivotrows.get(varval) 239 if matchingRows is None: 240 matchingRows = groupRow.pivotrows[varval] = [] 241 matchingRows.append(sourcerow) 242 243 if rowfunc: 244 rowfunc(groupRow) 245 246 # automatically add cache to all columns now that everything is binned 247 for c in self.nonKeyVisibleCols: 248 c.setCache(True) 249 250 251 Sheet.addCommand('W', 'pivot', 'vd.push(Pivot(sheet, keyCols, [cursorCol]))', 'open Pivot Table: group rows by key column and summarize current column') 252 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/pivot.py b/visidata/pivot.py --- a/visidata/pivot.py +++ b/visidata/pivot.py @@ -181,10 +181,11 @@ if width == 0: # only one value (and maybe errors) numericBins = [(minval, maxval)] - elif numericCols[0].type in (int, vlen) and nbins > (maxval - minval): - # more bins than int vals, just use the vals + elif (numericCols[0].type in (int, vlen) and nbins > (maxval - minval)) or (width == 1): + # (more bins than int vals) or (if bins are of width 1), just use the vals as bins degenerateBinning = True - numericBins = [(minval+i, minval+i) for i in range(maxval-minval+1)] + numericBins = [(val, val) for val in sorted(set(vals))] + nbins = len(numericBins) else: numericBins = [(minval+width*i, minval+width*(i+1)) for i in range(nbins)] @@ -214,7 +215,8 @@ if not width: binidx = 0 elif degenerateBinning: - binidx = val-minval + # in degenerate binning, each val has its own bin + binidx = numericBins.index((val, val)) else: binidx = int((val-minval)//width) groupRow = numericGroupRows[formatRange(numericCols[0], numericBins[min(binidx, nbins-1)])]
{"golden_diff": "diff --git a/visidata/pivot.py b/visidata/pivot.py\n--- a/visidata/pivot.py\n+++ b/visidata/pivot.py\n@@ -181,10 +181,11 @@\n if width == 0:\n # only one value (and maybe errors)\n numericBins = [(minval, maxval)]\n- elif numericCols[0].type in (int, vlen) and nbins > (maxval - minval):\n- # more bins than int vals, just use the vals\n+ elif (numericCols[0].type in (int, vlen) and nbins > (maxval - minval)) or (width == 1):\n+ # (more bins than int vals) or (if bins are of width 1), just use the vals as bins\n degenerateBinning = True\n- numericBins = [(minval+i, minval+i) for i in range(maxval-minval+1)]\n+ numericBins = [(val, val) for val in sorted(set(vals))]\n+ nbins = len(numericBins)\n else:\n numericBins = [(minval+width*i, minval+width*(i+1)) for i in range(nbins)]\n \n@@ -214,7 +215,8 @@\n if not width:\n binidx = 0\n elif degenerateBinning:\n- binidx = val-minval\n+ # in degenerate binning, each val has its own bin\n+ binidx = numericBins.index((val, val))\n else:\n binidx = int((val-minval)//width)\n groupRow = numericGroupRows[formatRange(numericCols[0], numericBins[min(binidx, nbins-1)])]\n", "issue": "frequency histogram for number is wrong\n**Small description**\r\n\r\nWith [this data](https://github.com/saulpw/visidata/files/5543325/a.txt), when type casting year column to number `#`, and asking for frequency `F`, bins are wrong. I works correctly when not type casting.\r\n\r\n**Expected result**\r\n\r\n(obtained without type casting and moving 2020 to the top)\r\n\r\n```\r\nyear\tcount\tpercent\thistogram\r\n2020\t11\t7.69\t*********************************************\r\n2019\t12\t8.39\t**************************************************\r\n2018\t12\t8.39\t**************************************************\r\n2017\t12\t8.39\t**************************************************\r\n2016\t12\t8.39\t**************************************************\r\n2015\t12\t8.39\t**************************************************\r\n2014\t12\t8.39\t**************************************************\r\n2013\t12\t8.39\t**************************************************\r\n2012\t12\t8.39\t**************************************************\r\n2011\t12\t8.39\t**************************************************\r\n2010\t12\t8.39\t**************************************************\r\n2009\t12\t8.39\t**************************************************\r\n```\r\n\r\n\r\n**Actual result with screenshot**\r\n\r\n(typecasting to number)\r\n\r\n```\r\nyear\tcount\tpercent\thistogram\r\n2009\t12\t8.39\t**************************\r\n2010\t12\t8.39\t**************************\r\n2011\t12\t8.39\t**************************\r\n2012\t12\t8.39\t**************************\r\n2013\t12\t8.39\t**************************\r\n2014\t12\t8.39\t**************************\r\n2015\t12\t8.39\t**************************\r\n2016\t12\t8.39\t**************************\r\n2017\t12\t8.39\t**************************\r\n2018\t12\t8.39\t**************************\r\n2019\t23\t16.08\t**************************************************\r\n2020\t0\t0.00\t\r\n```\r\n\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\n* [data](https://github.com/saulpw/visidata/files/5543325/a.txt)\r\n* [commands](https://github.com/saulpw/visidata/files/5543339/a.vd.txt)\r\n\r\n**Additional context**\r\n\r\nversion `saul.pw/VisiData v2.0.1`\r\n\r\nit seems there is a limit to 11 bins and the rest is empty. The more I put data, the more is aggregated in the 11th bin.\r\n\n", "before_files": [{"content": "import collections\nfrom visidata import *\n\n\n# discrete_keys = tuple of formatted discrete keys that group the row\n# numeric_key is a range\n# sourcerows is list(all source.rows in group)\n# pivotrows is { pivot_values: list(source.rows in group with pivot_values) }\nPivotGroupRow = collections.namedtuple('PivotGroupRow', 'discrete_keys numeric_key sourcerows pivotrows'.split())\n\ndef Pivot(source, groupByCols, pivotCols):\n return PivotSheet('',\n groupByCols,\n pivotCols,\n source=source)\n\ndef makeErrorKey(col):\n if col.type is date:\n return date.min # date('2000-01-01')\n else:\n return col.type()\n\ndef formatRange(col, numeric_key):\n a, b = numeric_key\n nankey = makeErrorKey(col)\n if a is nankey and b is nankey:\n return '#ERR'\n elif a == b:\n return col.format(a)\n return ' - '.join(col.format(x) for x in numeric_key)\n\nclass RangeColumn(Column):\n def format(self, typedval):\n if typedval is None:\n return None\n return formatRange(self.origcol, typedval)\n\nclass PivotSheet(Sheet):\n 'Summarize key columns in pivot table and display as new sheet.'\n rowtype = 'grouped rows' # rowdef: PivotGroupRow\n def __init__(self, name, groupByCols, pivotCols, **kwargs):\n super().__init__(name, **kwargs)\n\n self.pivotCols = pivotCols # whose values become columns\n self.groupByCols = groupByCols # whose values become rows\n\n def isNumericRange(self, col):\n return isNumeric(col) and self.source.options.numeric_binning\n\n def initCols(self):\n self.columns = []\n\n # add key columns (grouped by)\n for colnum, c in enumerate(self.groupByCols):\n if c in self.pivotCols:\n continue\n\n if self.isNumericRange(c):\n newcol = RangeColumn(c.name, origcol=c, width=c.width and c.width*2, getter=lambda c,r: r.numeric_key)\n else:\n newcol = Column(c.name, width=c.width, fmtstr=c.fmtstr,\n type=c.type if c.type in vd.typemap else anytype,\n origcol=c,\n getter=lambda col,row,i=colnum: row.discrete_keys[i],\n setter=lambda col,row,val,i=colnum: setitem(row.discrete_keys, i, val) and col.origcol.setValues(row.sourcerows, val))\n\n self.addColumn(newcol)\n\n self.setKeys(self.columns)\n\n def openRow(self, row):\n 'open sheet of source rows aggregated in current pivot row'\n vs = copy(self.source)\n vs.name += \"_%s\"%\"+\".join(row.discrete_keys)\n vs.rows = sum(row.pivotrows.values(), [])\n return vs\n\n def openCell(self, col, row):\n 'open sheet of source rows aggregated in current pivot cell'\n vs = copy(self.source)\n vs.name += \"_%s\"%col.aggvalue\n vs.rows = row.pivotrows.get(col.aggvalue, [])\n return vs\n\n def reload(self):\n self.initCols()\n\n # two different threads for better interactive display\n self.addAggregateCols()\n self.groupRows()\n\n @asyncthread\n def addAggregateCols(self):\n # add aggregated columns\n aggcols = { # [Column] -> list(aggregators)\n sourcecol: sourcecol.aggregators\n for sourcecol in self.source.visibleCols\n if hasattr(sourcecol, 'aggregators')\n } or { # if pivot given but no aggregators specified\n sourcecol: [vd.aggregators[\"count\"]]\n for sourcecol in self.pivotCols\n }\n\n if not aggcols:\n# self.addColumn(ColumnAttr('count', 'sourcerows', type=vlen))\n return\n\n # aggregators without pivot\n if not self.pivotCols:\n for aggcol, aggregatorlist in aggcols.items():\n for aggregator in aggregatorlist:\n aggname = '%s_%s' % (aggcol.name, aggregator.name)\n\n c = Column(aggname,\n type=aggregator.type or aggcol.type,\n getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows))\n self.addColumn(c)\n\n # add pivoted columns\n for pivotcol in self.pivotCols:\n allValues = set()\n for value in Progress(pivotcol.getValues(self.source.rows), 'pivoting', total=len(self.source.rows)):\n if value in allValues:\n continue\n allValues.add(value)\n\n if len(self.pivotCols) > 1:\n valname = '%s_%s' % (pivotcol.name, value)\n else:\n valname = str(value)\n\n for aggcol, aggregatorlist in aggcols.items():\n for aggregator in aggregatorlist:\n if len(aggcols) > 1: # if more than one aggregated column, include that column name in the new column name\n aggname = '%s_%s' % (aggcol.name, aggregator.name)\n else:\n aggname = aggregator.name\n\n\n if len(aggregatorlist) > 1 or len(aggcols) > 1:\n colname = '%s_%s' % (aggname, valname)\n if not self.name:\n self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols)\n else:\n colname = valname\n if not self.name:\n self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols) + '_' + aggname\n\n c = Column(colname,\n type=aggregator.type or aggcol.type,\n aggvalue=value,\n getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.pivotrows.get(col.aggvalue, [])))\n self.addColumn(c)\n\n# if aggregator.name != 'count': # already have count above\n# c = Column('Total_' + aggcol.name,\n# type=aggregator.type or aggcol.type,\n# getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows))\n# self.addColumn(c)\n\n @asyncthread\n def groupRows(self, rowfunc=None):\n self.rows = []\n\n discreteCols = [c for c in self.groupByCols if not self.isNumericRange(c)]\n\n numericCols = [c for c in self.groupByCols if self.isNumericRange(c)]\n\n if len(numericCols) > 1:\n vd.fail('only one numeric column can be binned')\n\n numericBins = []\n degenerateBinning = False\n if numericCols:\n nbins = options.histogram_bins or int(len(self.source.rows) ** (1./2))\n vals = tuple(numericCols[0].getValues(self.source.rows))\n minval = min(vals)\n maxval = max(vals)\n width = (maxval - minval)/nbins\n\n if width == 0:\n # only one value (and maybe errors)\n numericBins = [(minval, maxval)]\n elif numericCols[0].type in (int, vlen) and nbins > (maxval - minval):\n # more bins than int vals, just use the vals\n degenerateBinning = True\n numericBins = [(minval+i, minval+i) for i in range(maxval-minval+1)]\n else:\n numericBins = [(minval+width*i, minval+width*(i+1)) for i in range(nbins)]\n\n # group rows by their keys (groupByCols), and separate by their pivot values (pivotCols)\n groups = {} # [formattedDiscreteKeys] -> (numericGroupRows:dict(formattedNumericKeyRange -> PivotGroupRow), groupRow:PivotGroupRow) # groupRow is main/error row\n\n for sourcerow in Progress(self.source.iterrows(), 'grouping', total=self.source.nRows):\n discreteKeys = list(forward(origcol.getTypedValue(sourcerow)) for origcol in discreteCols)\n\n # wrapply will pass-through a key-able TypedWrapper\n formattedDiscreteKeys = tuple(wrapply(c.format, v) for v, c in zip(discreteKeys, discreteCols))\n\n numericGroupRows, groupRow = groups.get(formattedDiscreteKeys, (None, None))\n if numericGroupRows is None:\n # add new group rows\n numericGroupRows = {formatRange(numericCols[0], numRange): PivotGroupRow(discreteKeys, numRange, [], {}) for numRange in numericBins}\n groups[formattedDiscreteKeys] = (numericGroupRows, None)\n for r in numericGroupRows.values():\n self.addRow(r)\n\n # find the grouprow this sourcerow belongs in, by numericbin\n if numericCols:\n try:\n val = numericCols[0].getValue(sourcerow)\n if val is not None:\n val = numericCols[0].type(val)\n if not width:\n binidx = 0\n elif degenerateBinning:\n binidx = val-minval\n else:\n binidx = int((val-minval)//width)\n groupRow = numericGroupRows[formatRange(numericCols[0], numericBins[min(binidx, nbins-1)])]\n except Exception as e:\n # leave in main/error bin\n pass\n\n # add the main bin if no numeric bin (error, or no numeric cols)\n if groupRow is None:\n nankey = makeErrorKey(numericCols[0]) if numericCols else 0\n groupRow = PivotGroupRow(discreteKeys, (nankey, nankey), [], {})\n groups[formattedDiscreteKeys] = (numericGroupRows, groupRow)\n self.addRow(groupRow)\n\n # add the sourcerow to its all bin\n groupRow.sourcerows.append(sourcerow)\n\n # separate by pivot value\n for col in self.pivotCols:\n varval = col.getTypedValue(sourcerow)\n matchingRows = groupRow.pivotrows.get(varval)\n if matchingRows is None:\n matchingRows = groupRow.pivotrows[varval] = []\n matchingRows.append(sourcerow)\n\n if rowfunc:\n rowfunc(groupRow)\n\n # automatically add cache to all columns now that everything is binned\n for c in self.nonKeyVisibleCols:\n c.setCache(True)\n\n\nSheet.addCommand('W', 'pivot', 'vd.push(Pivot(sheet, keyCols, [cursorCol]))', 'open Pivot Table: group rows by key column and summarize current column')\n", "path": "visidata/pivot.py"}], "after_files": [{"content": "import collections\nfrom visidata import *\n\n\n# discrete_keys = tuple of formatted discrete keys that group the row\n# numeric_key is a range\n# sourcerows is list(all source.rows in group)\n# pivotrows is { pivot_values: list(source.rows in group with pivot_values) }\nPivotGroupRow = collections.namedtuple('PivotGroupRow', 'discrete_keys numeric_key sourcerows pivotrows'.split())\n\ndef Pivot(source, groupByCols, pivotCols):\n return PivotSheet('',\n groupByCols,\n pivotCols,\n source=source)\n\ndef makeErrorKey(col):\n if col.type is date:\n return date.min # date('2000-01-01')\n else:\n return col.type()\n\ndef formatRange(col, numeric_key):\n a, b = numeric_key\n nankey = makeErrorKey(col)\n if a is nankey and b is nankey:\n return '#ERR'\n elif a == b:\n return col.format(a)\n return ' - '.join(col.format(x) for x in numeric_key)\n\nclass RangeColumn(Column):\n def format(self, typedval):\n if typedval is None:\n return None\n return formatRange(self.origcol, typedval)\n\nclass PivotSheet(Sheet):\n 'Summarize key columns in pivot table and display as new sheet.'\n rowtype = 'grouped rows' # rowdef: PivotGroupRow\n def __init__(self, name, groupByCols, pivotCols, **kwargs):\n super().__init__(name, **kwargs)\n\n self.pivotCols = pivotCols # whose values become columns\n self.groupByCols = groupByCols # whose values become rows\n\n def isNumericRange(self, col):\n return isNumeric(col) and self.source.options.numeric_binning\n\n def initCols(self):\n self.columns = []\n\n # add key columns (grouped by)\n for colnum, c in enumerate(self.groupByCols):\n if c in self.pivotCols:\n continue\n\n if self.isNumericRange(c):\n newcol = RangeColumn(c.name, origcol=c, width=c.width and c.width*2, getter=lambda c,r: r.numeric_key)\n else:\n newcol = Column(c.name, width=c.width, fmtstr=c.fmtstr,\n type=c.type if c.type in vd.typemap else anytype,\n origcol=c,\n getter=lambda col,row,i=colnum: row.discrete_keys[i],\n setter=lambda col,row,val,i=colnum: setitem(row.discrete_keys, i, val) and col.origcol.setValues(row.sourcerows, val))\n\n self.addColumn(newcol)\n\n self.setKeys(self.columns)\n\n def openRow(self, row):\n 'open sheet of source rows aggregated in current pivot row'\n vs = copy(self.source)\n vs.name += \"_%s\"%\"+\".join(row.discrete_keys)\n vs.rows = sum(row.pivotrows.values(), [])\n return vs\n\n def openCell(self, col, row):\n 'open sheet of source rows aggregated in current pivot cell'\n vs = copy(self.source)\n vs.name += \"_%s\"%col.aggvalue\n vs.rows = row.pivotrows.get(col.aggvalue, [])\n return vs\n\n def reload(self):\n self.initCols()\n\n # two different threads for better interactive display\n self.addAggregateCols()\n self.groupRows()\n\n @asyncthread\n def addAggregateCols(self):\n # add aggregated columns\n aggcols = { # [Column] -> list(aggregators)\n sourcecol: sourcecol.aggregators\n for sourcecol in self.source.visibleCols\n if hasattr(sourcecol, 'aggregators')\n } or { # if pivot given but no aggregators specified\n sourcecol: [vd.aggregators[\"count\"]]\n for sourcecol in self.pivotCols\n }\n\n if not aggcols:\n# self.addColumn(ColumnAttr('count', 'sourcerows', type=vlen))\n return\n\n # aggregators without pivot\n if not self.pivotCols:\n for aggcol, aggregatorlist in aggcols.items():\n for aggregator in aggregatorlist:\n aggname = '%s_%s' % (aggcol.name, aggregator.name)\n\n c = Column(aggname,\n type=aggregator.type or aggcol.type,\n getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows))\n self.addColumn(c)\n\n # add pivoted columns\n for pivotcol in self.pivotCols:\n allValues = set()\n for value in Progress(pivotcol.getValues(self.source.rows), 'pivoting', total=len(self.source.rows)):\n if value in allValues:\n continue\n allValues.add(value)\n\n if len(self.pivotCols) > 1:\n valname = '%s_%s' % (pivotcol.name, value)\n else:\n valname = str(value)\n\n for aggcol, aggregatorlist in aggcols.items():\n for aggregator in aggregatorlist:\n if len(aggcols) > 1: # if more than one aggregated column, include that column name in the new column name\n aggname = '%s_%s' % (aggcol.name, aggregator.name)\n else:\n aggname = aggregator.name\n\n\n if len(aggregatorlist) > 1 or len(aggcols) > 1:\n colname = '%s_%s' % (aggname, valname)\n if not self.name:\n self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols)\n else:\n colname = valname\n if not self.name:\n self.name = self.source.name+'_pivot_'+''.join(c.name for c in self.pivotCols) + '_' + aggname\n\n c = Column(colname,\n type=aggregator.type or aggcol.type,\n aggvalue=value,\n getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.pivotrows.get(col.aggvalue, [])))\n self.addColumn(c)\n\n# if aggregator.name != 'count': # already have count above\n# c = Column('Total_' + aggcol.name,\n# type=aggregator.type or aggcol.type,\n# getter=lambda col,row,aggcol=aggcol,agg=aggregator: agg(aggcol, row.sourcerows))\n# self.addColumn(c)\n\n @asyncthread\n def groupRows(self, rowfunc=None):\n self.rows = []\n\n discreteCols = [c for c in self.groupByCols if not self.isNumericRange(c)]\n\n numericCols = [c for c in self.groupByCols if self.isNumericRange(c)]\n\n if len(numericCols) > 1:\n vd.fail('only one numeric column can be binned')\n\n numericBins = []\n degenerateBinning = False\n if numericCols:\n nbins = options.histogram_bins or int(len(self.source.rows) ** (1./2))\n vals = tuple(numericCols[0].getValues(self.source.rows))\n minval = min(vals)\n maxval = max(vals)\n width = (maxval - minval)/nbins\n\n if width == 0:\n # only one value (and maybe errors)\n numericBins = [(minval, maxval)]\n elif (numericCols[0].type in (int, vlen) and nbins > (maxval - minval)) or (width == 1):\n # (more bins than int vals) or (if bins are of width 1), just use the vals as bins\n degenerateBinning = True\n numericBins = [(val, val) for val in sorted(set(vals))]\n nbins = len(numericBins)\n else:\n numericBins = [(minval+width*i, minval+width*(i+1)) for i in range(nbins)]\n\n # group rows by their keys (groupByCols), and separate by their pivot values (pivotCols)\n groups = {} # [formattedDiscreteKeys] -> (numericGroupRows:dict(formattedNumericKeyRange -> PivotGroupRow), groupRow:PivotGroupRow) # groupRow is main/error row\n\n for sourcerow in Progress(self.source.iterrows(), 'grouping', total=self.source.nRows):\n discreteKeys = list(forward(origcol.getTypedValue(sourcerow)) for origcol in discreteCols)\n\n # wrapply will pass-through a key-able TypedWrapper\n formattedDiscreteKeys = tuple(wrapply(c.format, v) for v, c in zip(discreteKeys, discreteCols))\n\n numericGroupRows, groupRow = groups.get(formattedDiscreteKeys, (None, None))\n if numericGroupRows is None:\n # add new group rows\n numericGroupRows = {formatRange(numericCols[0], numRange): PivotGroupRow(discreteKeys, numRange, [], {}) for numRange in numericBins}\n groups[formattedDiscreteKeys] = (numericGroupRows, None)\n for r in numericGroupRows.values():\n self.addRow(r)\n\n # find the grouprow this sourcerow belongs in, by numericbin\n if numericCols:\n try:\n val = numericCols[0].getValue(sourcerow)\n if val is not None:\n val = numericCols[0].type(val)\n if not width:\n binidx = 0\n elif degenerateBinning:\n # in degenerate binning, each val has its own bin\n binidx = numericBins.index((val, val))\n else:\n binidx = int((val-minval)//width)\n groupRow = numericGroupRows[formatRange(numericCols[0], numericBins[min(binidx, nbins-1)])]\n except Exception as e:\n # leave in main/error bin\n pass\n\n # add the main bin if no numeric bin (error, or no numeric cols)\n if groupRow is None:\n nankey = makeErrorKey(numericCols[0]) if numericCols else 0\n groupRow = PivotGroupRow(discreteKeys, (nankey, nankey), [], {})\n groups[formattedDiscreteKeys] = (numericGroupRows, groupRow)\n self.addRow(groupRow)\n\n # add the sourcerow to its all bin\n groupRow.sourcerows.append(sourcerow)\n\n # separate by pivot value\n for col in self.pivotCols:\n varval = col.getTypedValue(sourcerow)\n matchingRows = groupRow.pivotrows.get(varval)\n if matchingRows is None:\n matchingRows = groupRow.pivotrows[varval] = []\n matchingRows.append(sourcerow)\n\n if rowfunc:\n rowfunc(groupRow)\n\n # automatically add cache to all columns now that everything is binned\n for c in self.nonKeyVisibleCols:\n c.setCache(True)\n\n\nSheet.addCommand('W', 'pivot', 'vd.push(Pivot(sheet, keyCols, [cursorCol]))', 'open Pivot Table: group rows by key column and summarize current column')\n", "path": "visidata/pivot.py"}]}
3,910
388
gh_patches_debug_22003
rasdani/github-patches
git_diff
flairNLP__flair-1372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plotter throws error if test loss is not monitored The Plotter method to plot training curves currently throws an error ( `ValueError: 'TEST_LOSS' is not in list`) if test loss is not monitored during training. To reproduce: ```python trainer: ModelTrainer = ModelTrainer(tagger, corpus) trainer.train( "resources/taggers/example-ner", max_epochs=1, ) plotter = Plotter() plotter.plot_training_curves("resources/taggers/example-ner/loss.tsv") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flair/visual/training_curves.py` Content: ``` 1 import logging 2 from collections import defaultdict 3 from pathlib import Path 4 from typing import Union, List 5 6 import numpy as np 7 import csv 8 9 import math 10 11 import matplotlib.pyplot as plt 12 13 14 # header for 'weights.txt' 15 WEIGHT_NAME = 1 16 WEIGHT_NUMBER = 2 17 WEIGHT_VALUE = 3 18 19 log = logging.getLogger("flair") 20 21 22 class Plotter(object): 23 """ 24 Plots training parameters (loss, f-score, and accuracy) and training weights over time. 25 Input files are the output files 'loss.tsv' and 'weights.txt' from training either a sequence tagger or text 26 classification model. 27 """ 28 29 @staticmethod 30 def _extract_evaluation_data(file_name: Path, score: str = "F1") -> dict: 31 training_curves = { 32 "train": {"loss": [], "score": []}, 33 "test": {"loss": [], "score": []}, 34 "dev": {"loss": [], "score": []}, 35 } 36 37 with open(file_name, "r") as tsvin: 38 tsvin = csv.reader(tsvin, delimiter="\t") 39 40 # determine the column index of loss, f-score and accuracy for train, dev and test split 41 row = next(tsvin, None) 42 43 score = score.upper() 44 45 if f"TEST_{score}" not in row: 46 log.warning("-" * 100) 47 log.warning(f"WARNING: No {score} found for test split in this data.") 48 log.warning( 49 f"Are you sure you want to plot {score} and not another value?" 50 ) 51 log.warning("-" * 100) 52 53 TRAIN_SCORE = ( 54 row.index(f"TRAIN_{score}") if f"TRAIN_{score}" in row else None 55 ) 56 DEV_SCORE = row.index(f"DEV_{score}") if f"DEV_{score}" in row else None 57 TEST_SCORE = row.index(f"TEST_{score}") 58 59 # then get all relevant values from the tsv 60 for row in tsvin: 61 62 if TRAIN_SCORE is not None: 63 if row[TRAIN_SCORE] != "_": 64 training_curves["train"]["score"].append( 65 float(row[TRAIN_SCORE]) 66 ) 67 68 if DEV_SCORE is not None: 69 if row[DEV_SCORE] != "_": 70 training_curves["dev"]["score"].append(float(row[DEV_SCORE])) 71 72 if row[TEST_SCORE] != "_": 73 training_curves["test"]["score"].append(float(row[TEST_SCORE])) 74 75 return training_curves 76 77 @staticmethod 78 def _extract_weight_data(file_name: Path) -> dict: 79 weights = defaultdict(lambda: defaultdict(lambda: list())) 80 81 with open(file_name, "r") as tsvin: 82 tsvin = csv.reader(tsvin, delimiter="\t") 83 84 for row in tsvin: 85 name = row[WEIGHT_NAME] 86 param = row[WEIGHT_NUMBER] 87 value = float(row[WEIGHT_VALUE]) 88 89 weights[name][param].append(value) 90 91 return weights 92 93 @staticmethod 94 def _extract_learning_rate(file_name: Path): 95 lrs = [] 96 losses = [] 97 98 with open(file_name, "r") as tsvin: 99 tsvin = csv.reader(tsvin, delimiter="\t") 100 row = next(tsvin, None) 101 LEARNING_RATE = row.index("LEARNING_RATE") 102 TRAIN_LOSS = row.index("TRAIN_LOSS") 103 104 # then get all relevant values from the tsv 105 for row in tsvin: 106 if row[TRAIN_LOSS] != "_": 107 losses.append(float(row[TRAIN_LOSS])) 108 if row[LEARNING_RATE] != "_": 109 lrs.append(float(row[LEARNING_RATE])) 110 111 return lrs, losses 112 113 def plot_weights(self, file_name: Union[str, Path]): 114 if type(file_name) is str: 115 file_name = Path(file_name) 116 117 weights = self._extract_weight_data(file_name) 118 119 total = len(weights) 120 columns = 2 121 rows = max(2, int(math.ceil(total / columns))) 122 # print(rows) 123 124 # figsize = (16, 16) 125 if rows != columns: 126 figsize = (8, rows + 0) 127 128 fig = plt.figure() 129 f, axarr = plt.subplots(rows, columns, figsize=figsize) 130 131 c = 0 132 r = 0 133 for name, values in weights.items(): 134 # plot i 135 axarr[r, c].set_title(name, fontsize=6) 136 for _, v in values.items(): 137 axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35) 138 axarr[r, c].set_yticks([]) 139 axarr[r, c].set_xticks([]) 140 c += 1 141 if c == columns: 142 c = 0 143 r += 1 144 145 while r != rows and c != columns: 146 axarr[r, c].set_yticks([]) 147 axarr[r, c].set_xticks([]) 148 c += 1 149 if c == columns: 150 c = 0 151 r += 1 152 153 # save plots 154 f.subplots_adjust(hspace=0.5) 155 plt.tight_layout(pad=1.0) 156 path = file_name.parent / "weights.png" 157 plt.savefig(path, dpi=300) 158 print( 159 f"Weights plots are saved in {path}" 160 ) # to let user know the path of the save plots 161 plt.close(fig) 162 163 def plot_training_curves( 164 self, file_name: Union[str, Path], plot_values: List[str] = ["loss", "F1"] 165 ): 166 if type(file_name) is str: 167 file_name = Path(file_name) 168 169 fig = plt.figure(figsize=(15, 10)) 170 171 for plot_no, plot_value in enumerate(plot_values): 172 173 training_curves = self._extract_evaluation_data(file_name, plot_value) 174 175 plt.subplot(len(plot_values), 1, plot_no + 1) 176 if training_curves["train"]["score"]: 177 x = np.arange(0, len(training_curves["train"]["score"])) 178 plt.plot( 179 x, training_curves["train"]["score"], label=f"training {plot_value}" 180 ) 181 if training_curves["dev"]["score"]: 182 x = np.arange(0, len(training_curves["dev"]["score"])) 183 plt.plot( 184 x, training_curves["dev"]["score"], label=f"validation {plot_value}" 185 ) 186 if training_curves["test"]["score"]: 187 x = np.arange(0, len(training_curves["test"]["score"])) 188 plt.plot( 189 x, training_curves["test"]["score"], label=f"test {plot_value}" 190 ) 191 plt.legend(bbox_to_anchor=(1.04, 0), loc="lower left", borderaxespad=0) 192 plt.ylabel(plot_value) 193 plt.xlabel("epochs") 194 195 # save plots 196 plt.tight_layout(pad=1.0) 197 path = file_name.parent / "training.png" 198 plt.savefig(path, dpi=300) 199 print( 200 f"Loss and F1 plots are saved in {path}" 201 ) # to let user know the path of the save plots 202 plt.show(block=False) # to have the plots displayed when user run this module 203 plt.close(fig) 204 205 def plot_learning_rate( 206 self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5 207 ): 208 if type(file_name) is str: 209 file_name = Path(file_name) 210 211 lrs, losses = self._extract_learning_rate(file_name) 212 lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:] 213 losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:] 214 215 fig, ax = plt.subplots(1, 1) 216 ax.plot(lrs, losses) 217 ax.set_ylabel("Loss") 218 ax.set_xlabel("Learning Rate") 219 ax.set_xscale("log") 220 ax.xaxis.set_major_formatter(plt.FormatStrFormatter("%.0e")) 221 222 # plt.show() 223 224 # save plot 225 plt.tight_layout(pad=1.0) 226 path = file_name.parent / "learning_rate.png" 227 plt.savefig(path, dpi=300) 228 print( 229 f"Learning_rate plots are saved in {path}" 230 ) # to let user know the path of the save plots 231 plt.show(block=True) # to have the plots displayed when user run this module 232 plt.close(fig) 233 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flair/visual/training_curves.py b/flair/visual/training_curves.py --- a/flair/visual/training_curves.py +++ b/flair/visual/training_curves.py @@ -54,7 +54,7 @@ row.index(f"TRAIN_{score}") if f"TRAIN_{score}" in row else None ) DEV_SCORE = row.index(f"DEV_{score}") if f"DEV_{score}" in row else None - TEST_SCORE = row.index(f"TEST_{score}") + TEST_SCORE = row.index(f"TEST_{score}") if f"TEST_{score}" in row else None # then get all relevant values from the tsv for row in tsvin: @@ -69,8 +69,9 @@ if row[DEV_SCORE] != "_": training_curves["dev"]["score"].append(float(row[DEV_SCORE])) - if row[TEST_SCORE] != "_": - training_curves["test"]["score"].append(float(row[TEST_SCORE])) + if TEST_SCORE is not None: + if row[TEST_SCORE] != "_": + training_curves["test"]["score"].append(float(row[TEST_SCORE])) return training_curves
{"golden_diff": "diff --git a/flair/visual/training_curves.py b/flair/visual/training_curves.py\n--- a/flair/visual/training_curves.py\n+++ b/flair/visual/training_curves.py\n@@ -54,7 +54,7 @@\n row.index(f\"TRAIN_{score}\") if f\"TRAIN_{score}\" in row else None\n )\n DEV_SCORE = row.index(f\"DEV_{score}\") if f\"DEV_{score}\" in row else None\n- TEST_SCORE = row.index(f\"TEST_{score}\")\n+ TEST_SCORE = row.index(f\"TEST_{score}\") if f\"TEST_{score}\" in row else None\n \n # then get all relevant values from the tsv\n for row in tsvin:\n@@ -69,8 +69,9 @@\n if row[DEV_SCORE] != \"_\":\n training_curves[\"dev\"][\"score\"].append(float(row[DEV_SCORE]))\n \n- if row[TEST_SCORE] != \"_\":\n- training_curves[\"test\"][\"score\"].append(float(row[TEST_SCORE]))\n+ if TEST_SCORE is not None:\n+ if row[TEST_SCORE] != \"_\":\n+ training_curves[\"test\"][\"score\"].append(float(row[TEST_SCORE]))\n \n return training_curves\n", "issue": "Plotter throws error if test loss is not monitored\nThe Plotter method to plot training curves currently throws an error (\r\n`ValueError: 'TEST_LOSS' is not in list`) if test loss is not monitored during training. \r\n\r\nTo reproduce:\r\n\r\n```python\r\ntrainer: ModelTrainer = ModelTrainer(tagger, corpus)\r\n\r\ntrainer.train(\r\n \"resources/taggers/example-ner\",\r\n max_epochs=1,\r\n)\r\n\r\nplotter = Plotter()\r\nplotter.plot_training_curves(\"resources/taggers/example-ner/loss.tsv\")\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Union, List\n\nimport numpy as np\nimport csv\n\nimport math\n\nimport matplotlib.pyplot as plt\n\n\n# header for 'weights.txt'\nWEIGHT_NAME = 1\nWEIGHT_NUMBER = 2\nWEIGHT_VALUE = 3\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Plotter(object):\n \"\"\"\n Plots training parameters (loss, f-score, and accuracy) and training weights over time.\n Input files are the output files 'loss.tsv' and 'weights.txt' from training either a sequence tagger or text\n classification model.\n \"\"\"\n\n @staticmethod\n def _extract_evaluation_data(file_name: Path, score: str = \"F1\") -> dict:\n training_curves = {\n \"train\": {\"loss\": [], \"score\": []},\n \"test\": {\"loss\": [], \"score\": []},\n \"dev\": {\"loss\": [], \"score\": []},\n }\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n # determine the column index of loss, f-score and accuracy for train, dev and test split\n row = next(tsvin, None)\n\n score = score.upper()\n\n if f\"TEST_{score}\" not in row:\n log.warning(\"-\" * 100)\n log.warning(f\"WARNING: No {score} found for test split in this data.\")\n log.warning(\n f\"Are you sure you want to plot {score} and not another value?\"\n )\n log.warning(\"-\" * 100)\n\n TRAIN_SCORE = (\n row.index(f\"TRAIN_{score}\") if f\"TRAIN_{score}\" in row else None\n )\n DEV_SCORE = row.index(f\"DEV_{score}\") if f\"DEV_{score}\" in row else None\n TEST_SCORE = row.index(f\"TEST_{score}\")\n\n # then get all relevant values from the tsv\n for row in tsvin:\n\n if TRAIN_SCORE is not None:\n if row[TRAIN_SCORE] != \"_\":\n training_curves[\"train\"][\"score\"].append(\n float(row[TRAIN_SCORE])\n )\n\n if DEV_SCORE is not None:\n if row[DEV_SCORE] != \"_\":\n training_curves[\"dev\"][\"score\"].append(float(row[DEV_SCORE]))\n\n if row[TEST_SCORE] != \"_\":\n training_curves[\"test\"][\"score\"].append(float(row[TEST_SCORE]))\n\n return training_curves\n\n @staticmethod\n def _extract_weight_data(file_name: Path) -> dict:\n weights = defaultdict(lambda: defaultdict(lambda: list()))\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n for row in tsvin:\n name = row[WEIGHT_NAME]\n param = row[WEIGHT_NUMBER]\n value = float(row[WEIGHT_VALUE])\n\n weights[name][param].append(value)\n\n return weights\n\n @staticmethod\n def _extract_learning_rate(file_name: Path):\n lrs = []\n losses = []\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n row = next(tsvin, None)\n LEARNING_RATE = row.index(\"LEARNING_RATE\")\n TRAIN_LOSS = row.index(\"TRAIN_LOSS\")\n\n # then get all relevant values from the tsv\n for row in tsvin:\n if row[TRAIN_LOSS] != \"_\":\n losses.append(float(row[TRAIN_LOSS]))\n if row[LEARNING_RATE] != \"_\":\n lrs.append(float(row[LEARNING_RATE]))\n\n return lrs, losses\n\n def plot_weights(self, file_name: Union[str, Path]):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n weights = self._extract_weight_data(file_name)\n\n total = len(weights)\n columns = 2\n rows = max(2, int(math.ceil(total / columns)))\n # print(rows)\n\n # figsize = (16, 16)\n if rows != columns:\n figsize = (8, rows + 0)\n\n fig = plt.figure()\n f, axarr = plt.subplots(rows, columns, figsize=figsize)\n\n c = 0\n r = 0\n for name, values in weights.items():\n # plot i\n axarr[r, c].set_title(name, fontsize=6)\n for _, v in values.items():\n axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35)\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n while r != rows and c != columns:\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n # save plots\n f.subplots_adjust(hspace=0.5)\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"weights.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Weights plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.close(fig)\n\n def plot_training_curves(\n self, file_name: Union[str, Path], plot_values: List[str] = [\"loss\", \"F1\"]\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n fig = plt.figure(figsize=(15, 10))\n\n for plot_no, plot_value in enumerate(plot_values):\n\n training_curves = self._extract_evaluation_data(file_name, plot_value)\n\n plt.subplot(len(plot_values), 1, plot_no + 1)\n if training_curves[\"train\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"train\"][\"score\"]))\n plt.plot(\n x, training_curves[\"train\"][\"score\"], label=f\"training {plot_value}\"\n )\n if training_curves[\"dev\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"dev\"][\"score\"]))\n plt.plot(\n x, training_curves[\"dev\"][\"score\"], label=f\"validation {plot_value}\"\n )\n if training_curves[\"test\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"test\"][\"score\"]))\n plt.plot(\n x, training_curves[\"test\"][\"score\"], label=f\"test {plot_value}\"\n )\n plt.legend(bbox_to_anchor=(1.04, 0), loc=\"lower left\", borderaxespad=0)\n plt.ylabel(plot_value)\n plt.xlabel(\"epochs\")\n\n # save plots\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"training.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Loss and F1 plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=False) # to have the plots displayed when user run this module\n plt.close(fig)\n\n def plot_learning_rate(\n self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n lrs, losses = self._extract_learning_rate(file_name)\n lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:]\n losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:]\n\n fig, ax = plt.subplots(1, 1)\n ax.plot(lrs, losses)\n ax.set_ylabel(\"Loss\")\n ax.set_xlabel(\"Learning Rate\")\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(plt.FormatStrFormatter(\"%.0e\"))\n\n # plt.show()\n\n # save plot\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"learning_rate.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Learning_rate plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=True) # to have the plots displayed when user run this module\n plt.close(fig)\n", "path": "flair/visual/training_curves.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Union, List\n\nimport numpy as np\nimport csv\n\nimport math\n\nimport matplotlib.pyplot as plt\n\n\n# header for 'weights.txt'\nWEIGHT_NAME = 1\nWEIGHT_NUMBER = 2\nWEIGHT_VALUE = 3\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Plotter(object):\n \"\"\"\n Plots training parameters (loss, f-score, and accuracy) and training weights over time.\n Input files are the output files 'loss.tsv' and 'weights.txt' from training either a sequence tagger or text\n classification model.\n \"\"\"\n\n @staticmethod\n def _extract_evaluation_data(file_name: Path, score: str = \"F1\") -> dict:\n training_curves = {\n \"train\": {\"loss\": [], \"score\": []},\n \"test\": {\"loss\": [], \"score\": []},\n \"dev\": {\"loss\": [], \"score\": []},\n }\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n # determine the column index of loss, f-score and accuracy for train, dev and test split\n row = next(tsvin, None)\n\n score = score.upper()\n\n if f\"TEST_{score}\" not in row:\n log.warning(\"-\" * 100)\n log.warning(f\"WARNING: No {score} found for test split in this data.\")\n log.warning(\n f\"Are you sure you want to plot {score} and not another value?\"\n )\n log.warning(\"-\" * 100)\n\n TRAIN_SCORE = (\n row.index(f\"TRAIN_{score}\") if f\"TRAIN_{score}\" in row else None\n )\n DEV_SCORE = row.index(f\"DEV_{score}\") if f\"DEV_{score}\" in row else None\n TEST_SCORE = row.index(f\"TEST_{score}\") if f\"TEST_{score}\" in row else None\n\n # then get all relevant values from the tsv\n for row in tsvin:\n\n if TRAIN_SCORE is not None:\n if row[TRAIN_SCORE] != \"_\":\n training_curves[\"train\"][\"score\"].append(\n float(row[TRAIN_SCORE])\n )\n\n if DEV_SCORE is not None:\n if row[DEV_SCORE] != \"_\":\n training_curves[\"dev\"][\"score\"].append(float(row[DEV_SCORE]))\n\n if TEST_SCORE is not None:\n if row[TEST_SCORE] != \"_\":\n training_curves[\"test\"][\"score\"].append(float(row[TEST_SCORE]))\n\n return training_curves\n\n @staticmethod\n def _extract_weight_data(file_name: Path) -> dict:\n weights = defaultdict(lambda: defaultdict(lambda: list()))\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n for row in tsvin:\n name = row[WEIGHT_NAME]\n param = row[WEIGHT_NUMBER]\n value = float(row[WEIGHT_VALUE])\n\n weights[name][param].append(value)\n\n return weights\n\n @staticmethod\n def _extract_learning_rate(file_name: Path):\n lrs = []\n losses = []\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n row = next(tsvin, None)\n LEARNING_RATE = row.index(\"LEARNING_RATE\")\n TRAIN_LOSS = row.index(\"TRAIN_LOSS\")\n\n # then get all relevant values from the tsv\n for row in tsvin:\n if row[TRAIN_LOSS] != \"_\":\n losses.append(float(row[TRAIN_LOSS]))\n if row[LEARNING_RATE] != \"_\":\n lrs.append(float(row[LEARNING_RATE]))\n\n return lrs, losses\n\n def plot_weights(self, file_name: Union[str, Path]):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n weights = self._extract_weight_data(file_name)\n\n total = len(weights)\n columns = 2\n rows = max(2, int(math.ceil(total / columns)))\n # print(rows)\n\n # figsize = (16, 16)\n if rows != columns:\n figsize = (8, rows + 0)\n\n fig = plt.figure()\n f, axarr = plt.subplots(rows, columns, figsize=figsize)\n\n c = 0\n r = 0\n for name, values in weights.items():\n # plot i\n axarr[r, c].set_title(name, fontsize=6)\n for _, v in values.items():\n axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35)\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n while r != rows and c != columns:\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n # save plots\n f.subplots_adjust(hspace=0.5)\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"weights.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Weights plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.close(fig)\n\n def plot_training_curves(\n self, file_name: Union[str, Path], plot_values: List[str] = [\"loss\", \"F1\"]\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n fig = plt.figure(figsize=(15, 10))\n\n for plot_no, plot_value in enumerate(plot_values):\n\n training_curves = self._extract_evaluation_data(file_name, plot_value)\n\n plt.subplot(len(plot_values), 1, plot_no + 1)\n if training_curves[\"train\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"train\"][\"score\"]))\n plt.plot(\n x, training_curves[\"train\"][\"score\"], label=f\"training {plot_value}\"\n )\n if training_curves[\"dev\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"dev\"][\"score\"]))\n plt.plot(\n x, training_curves[\"dev\"][\"score\"], label=f\"validation {plot_value}\"\n )\n if training_curves[\"test\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"test\"][\"score\"]))\n plt.plot(\n x, training_curves[\"test\"][\"score\"], label=f\"test {plot_value}\"\n )\n plt.legend(bbox_to_anchor=(1.04, 0), loc=\"lower left\", borderaxespad=0)\n plt.ylabel(plot_value)\n plt.xlabel(\"epochs\")\n\n # save plots\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"training.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Loss and F1 plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=False) # to have the plots displayed when user run this module\n plt.close(fig)\n\n def plot_learning_rate(\n self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n lrs, losses = self._extract_learning_rate(file_name)\n lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:]\n losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:]\n\n fig, ax = plt.subplots(1, 1)\n ax.plot(lrs, losses)\n ax.set_ylabel(\"Loss\")\n ax.set_xlabel(\"Learning Rate\")\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(plt.FormatStrFormatter(\"%.0e\"))\n\n # plt.show()\n\n # save plot\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"learning_rate.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Learning_rate plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=True) # to have the plots displayed when user run this module\n plt.close(fig)\n", "path": "flair/visual/training_curves.py"}]}
2,863
276
gh_patches_debug_37377
rasdani/github-patches
git_diff
sopel-irc__sopel-931
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [config] FilenameAttribute throws exceptions when configuring https://github.com/sopel-irc/sopel/blob/master/sopel/config/types.py#L314-L340 Serialize and parse require extra arguments that they are not passed during `sopel --configure-modules` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/config/types.py` Content: ``` 1 # coding=utf8 2 """Types for creating section definitions. 3 4 A section definition consists of a subclass of ``StaticSection``, on which any 5 number of subclasses of ``BaseValidated`` (a few common ones of which are 6 available in this module) are assigned as attributes. These descriptors define 7 how to read values from, and write values to, the config file. 8 9 As an example, if one wanted to define the ``[spam]`` section as having an 10 ``eggs`` option, which contains a list of values, they could do this: 11 12 >>> class SpamSection(StaticSection): 13 ... eggs = ListAttribute('eggs') 14 ... 15 >>> SpamSection(config, 'spam') 16 >>> print(config.spam.eggs) 17 [] 18 >>> config.spam.eggs = ['goose', 'turkey', 'duck', 'chicken', 'quail'] 19 >>> print(config.spam.eggs) 20 ['goose', 'turkey', 'duck', 'chicken', 'quail'] 21 >>> config.spam.eggs = 'herring' 22 Traceback (most recent call last): 23 ... 24 ValueError: ListAttribute value must be a list. 25 """ 26 27 from __future__ import unicode_literals 28 import os.path 29 import sys 30 from sopel.tools import get_input 31 32 try: 33 import configparser 34 except ImportError: 35 import ConfigParser as configparser 36 37 if sys.version_info.major >= 3: 38 unicode = str 39 basestring = (str, bytes) 40 41 42 class NO_DEFAULT(object): 43 """A special value to indicate that there should be no default.""" 44 45 46 class StaticSection(object): 47 """A configuration section with parsed and validated settings. 48 49 This class is intended to be subclassed with added ``ValidatedAttribute``s. 50 """ 51 def __init__(self, config, section_name, validate=True): 52 if not config.parser.has_section(section_name): 53 config.parser.add_section(section_name) 54 self._parent = config 55 self._parser = config.parser 56 self._section_name = section_name 57 for value in dir(self): 58 try: 59 getattr(self, value) 60 except ValueError as e: 61 raise ValueError( 62 'Invalid value for {}.{}: {}'.format(section_name, value, 63 e.message) 64 ) 65 except AttributeError: 66 if validate: 67 raise ValueError( 68 'Missing required value for {}.{}'.format(section_name, 69 value) 70 ) 71 72 def configure_setting(self, name, prompt, default=NO_DEFAULT): 73 """Return a validated value for this attribute from the terminal. 74 75 ``prompt`` will be the docstring of the attribute if not given. 76 77 If ``default`` is passed, it will be used if no value is given by the 78 user. If it is not passed, the current value of the setting, or the 79 default value if it's unset, will be used. Note that if ``default`` is 80 passed, the current value of the setting will be ignored, even if it is 81 not the attribute's default. 82 """ 83 clazz = getattr(self.__class__, name) 84 if default is NO_DEFAULT: 85 try: 86 default = getattr(self, name) 87 except AttributeError: 88 pass 89 except ValueError: 90 print('The configured value for this option was invalid.') 91 if clazz.default is not NO_DEFAULT: 92 default = clazz.default 93 while True: 94 try: 95 value = clazz.configure(prompt, default) 96 except ValueError as exc: 97 print(exc) 98 else: 99 break 100 setattr(self, name, value) 101 102 103 class BaseValidated(object): 104 """The base type for a descriptor in a ``StaticSection``.""" 105 def __init__(self, name, default=None): 106 """ 107 ``name`` is the name of the setting in the section. 108 ``default`` is the value to be returned if the setting is not set. If 109 not given, AttributeError will be raised instead. 110 """ 111 self.name = name 112 self.default = default 113 114 def configure(self, prompt, default): 115 """With the prompt and default, parse and return a value from terminal. 116 """ 117 if default is not NO_DEFAULT and default is not None: 118 prompt = '{} [{}]'.format(prompt, default) 119 value = get_input(prompt + ' ') 120 if not value and default is NO_DEFAULT: 121 raise ValueError("You must provide a value for this option.") 122 value = value or default 123 return self.parse(value) 124 125 def serialize(self, value): 126 """Take some object, and return the string to be saved to the file. 127 128 Must be implemented in subclasses. 129 """ 130 raise NotImplemented("Serialize method must be implemented in subclass") 131 132 def parse(self, value): 133 """Take a string from the file, and return the appropriate object. 134 135 Must be implemented in subclasses.""" 136 raise NotImplemented("Parse method must be implemented in subclass") 137 138 def __get__(self, instance, owner=None): 139 if instance is None: 140 # If instance is None, we're getting from a section class, not an 141 # instance of a session class. It makes the wizard code simpler 142 # (and is really just more intuitive) to return the descriptor 143 # instance here. 144 return self 145 146 try: 147 value = instance._parser.get(instance._section_name, self.name) 148 except configparser.NoOptionError: 149 if self.default is not NO_DEFAULT: 150 return self.default 151 raise AttributeError( 152 "Missing required value for {}.{}".format( 153 instance._section_name, self.name 154 ) 155 ) 156 return self.parse(value) 157 158 def __set__(self, instance, value): 159 if value is None: 160 instance._parser.remove_option(instance._section_name, self.name) 161 return 162 value = self.serialize(value) 163 instance._parser.set(instance._section_name, self.name, value) 164 165 def __delete__(self, instance): 166 instance._parser.remove_option(instance._section_name, self.name) 167 168 169 def _parse_boolean(value): 170 if value is True or value == 1: 171 return value 172 if isinstance(value, basestring): 173 return value.lower() in ['1', 'yes', 'y', 'true', 'on'] 174 return bool(value) 175 176 177 def _serialize_boolean(value): 178 return 'true' if _parse_boolean(value) else 'false' 179 180 181 class ValidatedAttribute(BaseValidated): 182 def __init__(self, name, parse=None, serialize=None, default=None): 183 """A descriptor for settings in a ``StaticSection`` 184 185 ``parse`` is the function to be used to read the string and create the 186 appropriate object. If not given, return the string as-is. 187 ``serialize`` takes an object, and returns the value to be written to 188 the file. If not given, defaults to ``unicode``. 189 """ 190 self.name = name 191 if parse == bool: 192 parse = _parse_boolean 193 if not serialize or serialize == bool: 194 serialize = _serialize_boolean 195 self.parse = parse or self.parse 196 self.serialize = serialize or self.serialize 197 self.default = default 198 199 def serialize(self, value): 200 return unicode(value) 201 202 def parse(self, value): 203 return value 204 205 def configure(self, prompt, default): 206 if self.parse == _parse_boolean: 207 prompt += ' (y/n)' 208 default = 'y' if default else 'n' 209 return super(ValidatedAttribute, self).configure(prompt, default) 210 211 212 class ListAttribute(BaseValidated): 213 """A config attribute containing a list of string values. 214 215 Values are saved to the file as a comma-separated list. It does not 216 currently support commas within items in the list. By default, the spaces 217 before and after each item are stripped; you can override this by passing 218 ``strip=False``.""" 219 def __init__(self, name, strip=True, default=None): 220 default = default or [] 221 super(ListAttribute, self).__init__(name, default=default) 222 self.strip = strip 223 224 def parse(self, value): 225 value = value.split(',') 226 if self.strip: 227 return [v.strip() for v in value] 228 else: 229 return value 230 231 def serialize(self, value): 232 if not isinstance(value, (list, set)): 233 raise ValueError('ListAttribute value must be a list.') 234 return ','.join(value) 235 236 def configure(self, prompt, default): 237 each_prompt = '?' 238 if isinstance(prompt, tuple): 239 each_prompt = prompt[1] 240 prompt = prompt[0] 241 242 if default is not NO_DEFAULT: 243 default = ','.join(default) 244 prompt = '{} [{}]'.format(prompt, default) 245 else: 246 default = '' 247 print(prompt) 248 values = [] 249 value = get_input(each_prompt + ' ') or default 250 while value: 251 values.append(value) 252 value = get_input(each_prompt + ' ') 253 return self.parse(','.join(values)) 254 255 256 class ChoiceAttribute(BaseValidated): 257 """A config attribute which must be one of a set group of options. 258 259 Currently, the choices can only be strings.""" 260 def __init__(self, name, choices, default=None): 261 super(ChoiceAttribute, self).__init__(name, default=default) 262 self.choices = choices 263 264 def parse(self, value): 265 if value in self.choices: 266 return value 267 else: 268 raise ValueError('Value must be in {}'.format(self.choices)) 269 270 def serialize(self, value): 271 if value in self.choices: 272 return value 273 else: 274 raise ValueError('Value must be in {}'.format(self.choices)) 275 276 277 class FilenameAttribute(BaseValidated): 278 """A config attribute which must be a file or directory.""" 279 def __init__(self, name, relative=True, directory=False, default=None): 280 """ 281 ``relative`` is whether the path should be relative to the location 282 of the config file (absolute paths will still be absolute). If 283 ``directory`` is True, the path must indicate a directory, rather than 284 a file. 285 """ 286 super(FilenameAttribute, self).__init__(name, default=default) 287 self.relative = relative 288 self.directory = directory 289 290 def __get__(self, instance, owner=None): 291 if instance is None: 292 return self 293 try: 294 value = instance._parser.get(instance._section_name, self.name) 295 except configparser.NoOptionError: 296 if self.default is not NO_DEFAULT: 297 value = self.default 298 else: 299 raise AttributeError( 300 "Missing required value for {}.{}".format( 301 instance._section_name, self.name 302 ) 303 ) 304 main_config = instance._parent 305 this_section = getattr(main_config, instance._section_name) 306 return self.parse(main_config, this_section, value) 307 308 def __set__(self, instance, value): 309 main_config = instance._parent 310 this_section = getattr(main_config, instance._section_name) 311 value = self.serialize(main_config, this_section, value) 312 instance._parser.set(instance._section_name, self.name, value) 313 314 def parse(self, main_config, this_section, value): 315 if value is None: 316 return 317 318 value = os.path.expanduser(value) 319 320 if not os.path.isabs(value): 321 if not self.relative: 322 raise ValueError("Value must be an absolute path.") 323 value = os.path.join(main_config.homedir, value) 324 325 if self.directory and not os.path.isdir(value): 326 try: 327 os.makedirs(value) 328 except OSError: 329 raise ValueError( 330 "Value must be an existing or creatable directory.") 331 if not self.directory and not os.path.isfile(value): 332 try: 333 open(value, 'w').close() 334 except OSError: 335 raise ValueError("Value must be an existant or creatable file.") 336 return value 337 338 def serialize(self, main_config, this_section, value): 339 self.parse(main_config, this_section, value) 340 return value # So that it's still relative 341 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/config/types.py b/sopel/config/types.py --- a/sopel/config/types.py +++ b/sopel/config/types.py @@ -92,7 +92,7 @@ default = clazz.default while True: try: - value = clazz.configure(prompt, default) + value = clazz.configure(prompt, default, self._parent, self._section_name) except ValueError as exc: print(exc) else: @@ -111,7 +111,7 @@ self.name = name self.default = default - def configure(self, prompt, default): + def configure(self, prompt, default, parent, section_name): """With the prompt and default, parse and return a value from terminal. """ if default is not NO_DEFAULT and default is not None: @@ -202,7 +202,7 @@ def parse(self, value): return value - def configure(self, prompt, default): + def configure(self, prompt, default, parent, section_name): if self.parse == _parse_boolean: prompt += ' (y/n)' default = 'y' if default else 'n' @@ -233,7 +233,7 @@ raise ValueError('ListAttribute value must be a list.') return ','.join(value) - def configure(self, prompt, default): + def configure(self, prompt, default, parent, section_name): each_prompt = '?' if isinstance(prompt, tuple): each_prompt = prompt[1] @@ -311,6 +311,17 @@ value = self.serialize(main_config, this_section, value) instance._parser.set(instance._section_name, self.name, value) + def configure(self, prompt, default, parent, section_name): + """With the prompt and default, parse and return a value from terminal. + """ + if default is not NO_DEFAULT and default is not None: + prompt = '{} [{}]'.format(prompt, default) + value = get_input(prompt + ' ') + if not value and default is NO_DEFAULT: + raise ValueError("You must provide a value for this option.") + value = value or default + return self.parse(parent, section_name, value) + def parse(self, main_config, this_section, value): if value is None: return
{"golden_diff": "diff --git a/sopel/config/types.py b/sopel/config/types.py\n--- a/sopel/config/types.py\n+++ b/sopel/config/types.py\n@@ -92,7 +92,7 @@\n default = clazz.default\n while True:\n try:\n- value = clazz.configure(prompt, default)\n+ value = clazz.configure(prompt, default, self._parent, self._section_name)\n except ValueError as exc:\n print(exc)\n else:\n@@ -111,7 +111,7 @@\n self.name = name\n self.default = default\n \n- def configure(self, prompt, default):\n+ def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n@@ -202,7 +202,7 @@\n def parse(self, value):\n return value\n \n- def configure(self, prompt, default):\n+ def configure(self, prompt, default, parent, section_name):\n if self.parse == _parse_boolean:\n prompt += ' (y/n)'\n default = 'y' if default else 'n'\n@@ -233,7 +233,7 @@\n raise ValueError('ListAttribute value must be a list.')\n return ','.join(value)\n \n- def configure(self, prompt, default):\n+ def configure(self, prompt, default, parent, section_name):\n each_prompt = '?'\n if isinstance(prompt, tuple):\n each_prompt = prompt[1]\n@@ -311,6 +311,17 @@\n value = self.serialize(main_config, this_section, value)\n instance._parser.set(instance._section_name, self.name, value)\n \n+ def configure(self, prompt, default, parent, section_name):\n+ \"\"\"With the prompt and default, parse and return a value from terminal.\n+ \"\"\"\n+ if default is not NO_DEFAULT and default is not None:\n+ prompt = '{} [{}]'.format(prompt, default)\n+ value = get_input(prompt + ' ')\n+ if not value and default is NO_DEFAULT:\n+ raise ValueError(\"You must provide a value for this option.\")\n+ value = value or default\n+ return self.parse(parent, section_name, value)\n+\n def parse(self, main_config, this_section, value):\n if value is None:\n return\n", "issue": "[config] FilenameAttribute throws exceptions when configuring\nhttps://github.com/sopel-irc/sopel/blob/master/sopel/config/types.py#L314-L340\n\nSerialize and parse require extra arguments that they are not passed during `sopel --configure-modules`\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"Types for creating section definitions.\n\nA section definition consists of a subclass of ``StaticSection``, on which any\nnumber of subclasses of ``BaseValidated`` (a few common ones of which are\navailable in this module) are assigned as attributes. These descriptors define\nhow to read values from, and write values to, the config file.\n\nAs an example, if one wanted to define the ``[spam]`` section as having an\n``eggs`` option, which contains a list of values, they could do this:\n\n >>> class SpamSection(StaticSection):\n ... eggs = ListAttribute('eggs')\n ...\n >>> SpamSection(config, 'spam')\n >>> print(config.spam.eggs)\n []\n >>> config.spam.eggs = ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> print(config.spam.eggs)\n ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> config.spam.eggs = 'herring'\n Traceback (most recent call last):\n ...\n ValueError: ListAttribute value must be a list.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport sys\nfrom sopel.tools import get_input\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = (str, bytes)\n\n\nclass NO_DEFAULT(object):\n \"\"\"A special value to indicate that there should be no default.\"\"\"\n\n\nclass StaticSection(object):\n \"\"\"A configuration section with parsed and validated settings.\n\n This class is intended to be subclassed with added ``ValidatedAttribute``s.\n \"\"\"\n def __init__(self, config, section_name, validate=True):\n if not config.parser.has_section(section_name):\n config.parser.add_section(section_name)\n self._parent = config\n self._parser = config.parser\n self._section_name = section_name\n for value in dir(self):\n try:\n getattr(self, value)\n except ValueError as e:\n raise ValueError(\n 'Invalid value for {}.{}: {}'.format(section_name, value,\n e.message)\n )\n except AttributeError:\n if validate:\n raise ValueError(\n 'Missing required value for {}.{}'.format(section_name,\n value)\n )\n\n def configure_setting(self, name, prompt, default=NO_DEFAULT):\n \"\"\"Return a validated value for this attribute from the terminal.\n\n ``prompt`` will be the docstring of the attribute if not given.\n\n If ``default`` is passed, it will be used if no value is given by the\n user. If it is not passed, the current value of the setting, or the\n default value if it's unset, will be used. Note that if ``default`` is\n passed, the current value of the setting will be ignored, even if it is\n not the attribute's default.\n \"\"\"\n clazz = getattr(self.__class__, name)\n if default is NO_DEFAULT:\n try:\n default = getattr(self, name)\n except AttributeError:\n pass\n except ValueError:\n print('The configured value for this option was invalid.')\n if clazz.default is not NO_DEFAULT:\n default = clazz.default\n while True:\n try:\n value = clazz.configure(prompt, default)\n except ValueError as exc:\n print(exc)\n else:\n break\n setattr(self, name, value)\n\n\nclass BaseValidated(object):\n \"\"\"The base type for a descriptor in a ``StaticSection``.\"\"\"\n def __init__(self, name, default=None):\n \"\"\"\n ``name`` is the name of the setting in the section.\n ``default`` is the value to be returned if the setting is not set. If\n not given, AttributeError will be raised instead.\n \"\"\"\n self.name = name\n self.default = default\n\n def configure(self, prompt, default):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(value)\n\n def serialize(self, value):\n \"\"\"Take some object, and return the string to be saved to the file.\n\n Must be implemented in subclasses.\n \"\"\"\n raise NotImplemented(\"Serialize method must be implemented in subclass\")\n\n def parse(self, value):\n \"\"\"Take a string from the file, and return the appropriate object.\n\n Must be implemented in subclasses.\"\"\"\n raise NotImplemented(\"Parse method must be implemented in subclass\")\n\n def __get__(self, instance, owner=None):\n if instance is None:\n # If instance is None, we're getting from a section class, not an\n # instance of a session class. It makes the wizard code simpler\n # (and is really just more intuitive) to return the descriptor\n # instance here.\n return self\n\n try:\n value = instance._parser.get(instance._section_name, self.name)\n except configparser.NoOptionError:\n if self.default is not NO_DEFAULT:\n return self.default\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n return self.parse(value)\n\n def __set__(self, instance, value):\n if value is None:\n instance._parser.remove_option(instance._section_name, self.name)\n return\n value = self.serialize(value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def __delete__(self, instance):\n instance._parser.remove_option(instance._section_name, self.name)\n\n\ndef _parse_boolean(value):\n if value is True or value == 1:\n return value\n if isinstance(value, basestring):\n return value.lower() in ['1', 'yes', 'y', 'true', 'on']\n return bool(value)\n\n\ndef _serialize_boolean(value):\n return 'true' if _parse_boolean(value) else 'false'\n\n\nclass ValidatedAttribute(BaseValidated):\n def __init__(self, name, parse=None, serialize=None, default=None):\n \"\"\"A descriptor for settings in a ``StaticSection``\n\n ``parse`` is the function to be used to read the string and create the\n appropriate object. If not given, return the string as-is.\n ``serialize`` takes an object, and returns the value to be written to\n the file. If not given, defaults to ``unicode``.\n \"\"\"\n self.name = name\n if parse == bool:\n parse = _parse_boolean\n if not serialize or serialize == bool:\n serialize = _serialize_boolean\n self.parse = parse or self.parse\n self.serialize = serialize or self.serialize\n self.default = default\n\n def serialize(self, value):\n return unicode(value)\n\n def parse(self, value):\n return value\n\n def configure(self, prompt, default):\n if self.parse == _parse_boolean:\n prompt += ' (y/n)'\n default = 'y' if default else 'n'\n return super(ValidatedAttribute, self).configure(prompt, default)\n\n\nclass ListAttribute(BaseValidated):\n \"\"\"A config attribute containing a list of string values.\n\n Values are saved to the file as a comma-separated list. It does not\n currently support commas within items in the list. By default, the spaces\n before and after each item are stripped; you can override this by passing\n ``strip=False``.\"\"\"\n def __init__(self, name, strip=True, default=None):\n default = default or []\n super(ListAttribute, self).__init__(name, default=default)\n self.strip = strip\n\n def parse(self, value):\n value = value.split(',')\n if self.strip:\n return [v.strip() for v in value]\n else:\n return value\n\n def serialize(self, value):\n if not isinstance(value, (list, set)):\n raise ValueError('ListAttribute value must be a list.')\n return ','.join(value)\n\n def configure(self, prompt, default):\n each_prompt = '?'\n if isinstance(prompt, tuple):\n each_prompt = prompt[1]\n prompt = prompt[0]\n\n if default is not NO_DEFAULT:\n default = ','.join(default)\n prompt = '{} [{}]'.format(prompt, default)\n else:\n default = ''\n print(prompt)\n values = []\n value = get_input(each_prompt + ' ') or default\n while value:\n values.append(value)\n value = get_input(each_prompt + ' ')\n return self.parse(','.join(values))\n\n\nclass ChoiceAttribute(BaseValidated):\n \"\"\"A config attribute which must be one of a set group of options.\n\n Currently, the choices can only be strings.\"\"\"\n def __init__(self, name, choices, default=None):\n super(ChoiceAttribute, self).__init__(name, default=default)\n self.choices = choices\n\n def parse(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n def serialize(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n\nclass FilenameAttribute(BaseValidated):\n \"\"\"A config attribute which must be a file or directory.\"\"\"\n def __init__(self, name, relative=True, directory=False, default=None):\n \"\"\"\n ``relative`` is whether the path should be relative to the location\n of the config file (absolute paths will still be absolute). If\n ``directory`` is True, the path must indicate a directory, rather than\n a file.\n \"\"\"\n super(FilenameAttribute, self).__init__(name, default=default)\n self.relative = relative\n self.directory = directory\n\n def __get__(self, instance, owner=None):\n if instance is None:\n return self\n try:\n value = instance._parser.get(instance._section_name, self.name)\n except configparser.NoOptionError:\n if self.default is not NO_DEFAULT:\n value = self.default\n else:\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n return self.parse(main_config, this_section, value)\n\n def __set__(self, instance, value):\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n value = self.serialize(main_config, this_section, value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def parse(self, main_config, this_section, value):\n if value is None:\n return\n\n value = os.path.expanduser(value)\n\n if not os.path.isabs(value):\n if not self.relative:\n raise ValueError(\"Value must be an absolute path.\")\n value = os.path.join(main_config.homedir, value)\n\n if self.directory and not os.path.isdir(value):\n try:\n os.makedirs(value)\n except OSError:\n raise ValueError(\n \"Value must be an existing or creatable directory.\")\n if not self.directory and not os.path.isfile(value):\n try:\n open(value, 'w').close()\n except OSError:\n raise ValueError(\"Value must be an existant or creatable file.\")\n return value\n\n def serialize(self, main_config, this_section, value):\n self.parse(main_config, this_section, value)\n return value # So that it's still relative\n", "path": "sopel/config/types.py"}], "after_files": [{"content": "# coding=utf8\n\"\"\"Types for creating section definitions.\n\nA section definition consists of a subclass of ``StaticSection``, on which any\nnumber of subclasses of ``BaseValidated`` (a few common ones of which are\navailable in this module) are assigned as attributes. These descriptors define\nhow to read values from, and write values to, the config file.\n\nAs an example, if one wanted to define the ``[spam]`` section as having an\n``eggs`` option, which contains a list of values, they could do this:\n\n >>> class SpamSection(StaticSection):\n ... eggs = ListAttribute('eggs')\n ...\n >>> SpamSection(config, 'spam')\n >>> print(config.spam.eggs)\n []\n >>> config.spam.eggs = ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> print(config.spam.eggs)\n ['goose', 'turkey', 'duck', 'chicken', 'quail']\n >>> config.spam.eggs = 'herring'\n Traceback (most recent call last):\n ...\n ValueError: ListAttribute value must be a list.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport os.path\nimport sys\nfrom sopel.tools import get_input\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nif sys.version_info.major >= 3:\n unicode = str\n basestring = (str, bytes)\n\n\nclass NO_DEFAULT(object):\n \"\"\"A special value to indicate that there should be no default.\"\"\"\n\n\nclass StaticSection(object):\n \"\"\"A configuration section with parsed and validated settings.\n\n This class is intended to be subclassed with added ``ValidatedAttribute``s.\n \"\"\"\n def __init__(self, config, section_name, validate=True):\n if not config.parser.has_section(section_name):\n config.parser.add_section(section_name)\n self._parent = config\n self._parser = config.parser\n self._section_name = section_name\n for value in dir(self):\n try:\n getattr(self, value)\n except ValueError as e:\n raise ValueError(\n 'Invalid value for {}.{}: {}'.format(section_name, value,\n e.message)\n )\n except AttributeError:\n if validate:\n raise ValueError(\n 'Missing required value for {}.{}'.format(section_name,\n value)\n )\n\n def configure_setting(self, name, prompt, default=NO_DEFAULT):\n \"\"\"Return a validated value for this attribute from the terminal.\n\n ``prompt`` will be the docstring of the attribute if not given.\n\n If ``default`` is passed, it will be used if no value is given by the\n user. If it is not passed, the current value of the setting, or the\n default value if it's unset, will be used. Note that if ``default`` is\n passed, the current value of the setting will be ignored, even if it is\n not the attribute's default.\n \"\"\"\n clazz = getattr(self.__class__, name)\n if default is NO_DEFAULT:\n try:\n default = getattr(self, name)\n except AttributeError:\n pass\n except ValueError:\n print('The configured value for this option was invalid.')\n if clazz.default is not NO_DEFAULT:\n default = clazz.default\n while True:\n try:\n value = clazz.configure(prompt, default, self._parent, self._section_name)\n except ValueError as exc:\n print(exc)\n else:\n break\n setattr(self, name, value)\n\n\nclass BaseValidated(object):\n \"\"\"The base type for a descriptor in a ``StaticSection``.\"\"\"\n def __init__(self, name, default=None):\n \"\"\"\n ``name`` is the name of the setting in the section.\n ``default`` is the value to be returned if the setting is not set. If\n not given, AttributeError will be raised instead.\n \"\"\"\n self.name = name\n self.default = default\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(value)\n\n def serialize(self, value):\n \"\"\"Take some object, and return the string to be saved to the file.\n\n Must be implemented in subclasses.\n \"\"\"\n raise NotImplemented(\"Serialize method must be implemented in subclass\")\n\n def parse(self, value):\n \"\"\"Take a string from the file, and return the appropriate object.\n\n Must be implemented in subclasses.\"\"\"\n raise NotImplemented(\"Parse method must be implemented in subclass\")\n\n def __get__(self, instance, owner=None):\n if instance is None:\n # If instance is None, we're getting from a section class, not an\n # instance of a session class. It makes the wizard code simpler\n # (and is really just more intuitive) to return the descriptor\n # instance here.\n return self\n\n try:\n value = instance._parser.get(instance._section_name, self.name)\n except configparser.NoOptionError:\n if self.default is not NO_DEFAULT:\n return self.default\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n return self.parse(value)\n\n def __set__(self, instance, value):\n if value is None:\n instance._parser.remove_option(instance._section_name, self.name)\n return\n value = self.serialize(value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def __delete__(self, instance):\n instance._parser.remove_option(instance._section_name, self.name)\n\n\ndef _parse_boolean(value):\n if value is True or value == 1:\n return value\n if isinstance(value, basestring):\n return value.lower() in ['1', 'yes', 'y', 'true', 'on']\n return bool(value)\n\n\ndef _serialize_boolean(value):\n return 'true' if _parse_boolean(value) else 'false'\n\n\nclass ValidatedAttribute(BaseValidated):\n def __init__(self, name, parse=None, serialize=None, default=None):\n \"\"\"A descriptor for settings in a ``StaticSection``\n\n ``parse`` is the function to be used to read the string and create the\n appropriate object. If not given, return the string as-is.\n ``serialize`` takes an object, and returns the value to be written to\n the file. If not given, defaults to ``unicode``.\n \"\"\"\n self.name = name\n if parse == bool:\n parse = _parse_boolean\n if not serialize or serialize == bool:\n serialize = _serialize_boolean\n self.parse = parse or self.parse\n self.serialize = serialize or self.serialize\n self.default = default\n\n def serialize(self, value):\n return unicode(value)\n\n def parse(self, value):\n return value\n\n def configure(self, prompt, default, parent, section_name):\n if self.parse == _parse_boolean:\n prompt += ' (y/n)'\n default = 'y' if default else 'n'\n return super(ValidatedAttribute, self).configure(prompt, default)\n\n\nclass ListAttribute(BaseValidated):\n \"\"\"A config attribute containing a list of string values.\n\n Values are saved to the file as a comma-separated list. It does not\n currently support commas within items in the list. By default, the spaces\n before and after each item are stripped; you can override this by passing\n ``strip=False``.\"\"\"\n def __init__(self, name, strip=True, default=None):\n default = default or []\n super(ListAttribute, self).__init__(name, default=default)\n self.strip = strip\n\n def parse(self, value):\n value = value.split(',')\n if self.strip:\n return [v.strip() for v in value]\n else:\n return value\n\n def serialize(self, value):\n if not isinstance(value, (list, set)):\n raise ValueError('ListAttribute value must be a list.')\n return ','.join(value)\n\n def configure(self, prompt, default, parent, section_name):\n each_prompt = '?'\n if isinstance(prompt, tuple):\n each_prompt = prompt[1]\n prompt = prompt[0]\n\n if default is not NO_DEFAULT:\n default = ','.join(default)\n prompt = '{} [{}]'.format(prompt, default)\n else:\n default = ''\n print(prompt)\n values = []\n value = get_input(each_prompt + ' ') or default\n while value:\n values.append(value)\n value = get_input(each_prompt + ' ')\n return self.parse(','.join(values))\n\n\nclass ChoiceAttribute(BaseValidated):\n \"\"\"A config attribute which must be one of a set group of options.\n\n Currently, the choices can only be strings.\"\"\"\n def __init__(self, name, choices, default=None):\n super(ChoiceAttribute, self).__init__(name, default=default)\n self.choices = choices\n\n def parse(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n def serialize(self, value):\n if value in self.choices:\n return value\n else:\n raise ValueError('Value must be in {}'.format(self.choices))\n\n\nclass FilenameAttribute(BaseValidated):\n \"\"\"A config attribute which must be a file or directory.\"\"\"\n def __init__(self, name, relative=True, directory=False, default=None):\n \"\"\"\n ``relative`` is whether the path should be relative to the location\n of the config file (absolute paths will still be absolute). If\n ``directory`` is True, the path must indicate a directory, rather than\n a file.\n \"\"\"\n super(FilenameAttribute, self).__init__(name, default=default)\n self.relative = relative\n self.directory = directory\n\n def __get__(self, instance, owner=None):\n if instance is None:\n return self\n try:\n value = instance._parser.get(instance._section_name, self.name)\n except configparser.NoOptionError:\n if self.default is not NO_DEFAULT:\n value = self.default\n else:\n raise AttributeError(\n \"Missing required value for {}.{}\".format(\n instance._section_name, self.name\n )\n )\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n return self.parse(main_config, this_section, value)\n\n def __set__(self, instance, value):\n main_config = instance._parent\n this_section = getattr(main_config, instance._section_name)\n value = self.serialize(main_config, this_section, value)\n instance._parser.set(instance._section_name, self.name, value)\n\n def configure(self, prompt, default, parent, section_name):\n \"\"\"With the prompt and default, parse and return a value from terminal.\n \"\"\"\n if default is not NO_DEFAULT and default is not None:\n prompt = '{} [{}]'.format(prompt, default)\n value = get_input(prompt + ' ')\n if not value and default is NO_DEFAULT:\n raise ValueError(\"You must provide a value for this option.\")\n value = value or default\n return self.parse(parent, section_name, value)\n\n def parse(self, main_config, this_section, value):\n if value is None:\n return\n\n value = os.path.expanduser(value)\n\n if not os.path.isabs(value):\n if not self.relative:\n raise ValueError(\"Value must be an absolute path.\")\n value = os.path.join(main_config.homedir, value)\n\n if self.directory and not os.path.isdir(value):\n try:\n os.makedirs(value)\n except OSError:\n raise ValueError(\n \"Value must be an existing or creatable directory.\")\n if not self.directory and not os.path.isfile(value):\n try:\n open(value, 'w').close()\n except OSError:\n raise ValueError(\"Value must be an existant or creatable file.\")\n return value\n\n def serialize(self, main_config, this_section, value):\n self.parse(main_config, this_section, value)\n return value # So that it's still relative\n", "path": "sopel/config/types.py"}]}
3,784
527
gh_patches_debug_20958
rasdani/github-patches
git_diff
ansible__ansible-modules-core-4750
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix #4412: os_security_group_rule support numbered protocols ##### ISSUE TYPE - Bugfix Pull Request ##### COMPONENT NAME cloud/openstack/os_security_group_rule.py ##### ANSIBLE VERSION ``` ansible 2.1.1.0 config file = configured module search path = Default w/o overrides ``` ##### SUMMARY Bugfix correctly points out lack of VRRP support by way of numbered protocol. This commit adds this functionality, including example usage in the documentation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cloud/openstack/os_security_group_rule.py` Content: ``` 1 #!/usr/bin/python 2 3 # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. 4 # Copyright (c) 2013, Benno Joy <[email protected]> 5 # 6 # This module is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This software is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this software. If not, see <http://www.gnu.org/licenses/>. 18 19 try: 20 import shade 21 HAS_SHADE = True 22 except ImportError: 23 HAS_SHADE = False 24 25 26 DOCUMENTATION = ''' 27 --- 28 module: os_security_group_rule 29 short_description: Add/Delete rule from an existing security group 30 extends_documentation_fragment: openstack 31 version_added: "2.0" 32 description: 33 - Add or Remove rule from an existing security group 34 options: 35 security_group: 36 description: 37 - Name or ID of the security group 38 required: true 39 protocol: 40 description: 41 - IP protocols TCP UDP ICMP 112 (VRRP) 42 choices: ['tcp', 'udp', 'icmp', 112, None] 43 default: None 44 port_range_min: 45 description: 46 - Starting port 47 required: false 48 default: None 49 port_range_max: 50 description: 51 - Ending port 52 required: false 53 default: None 54 remote_ip_prefix: 55 description: 56 - Source IP address(es) in CIDR notation (exclusive with remote_group) 57 required: false 58 remote_group: 59 description: 60 - Name or ID of the Security group to link (exclusive with 61 remote_ip_prefix) 62 required: false 63 ethertype: 64 description: 65 - Must be IPv4 or IPv6, and addresses represented in CIDR must 66 match the ingress or egress rules. Not all providers support IPv6. 67 choices: ['IPv4', 'IPv6'] 68 default: IPv4 69 direction: 70 description: 71 - The direction in which the security group rule is applied. Not 72 all providers support egress. 73 choices: ['egress', 'ingress'] 74 default: ingress 75 state: 76 description: 77 - Should the resource be present or absent. 78 choices: [present, absent] 79 default: present 80 requirements: ["shade"] 81 ''' 82 83 EXAMPLES = ''' 84 # Create a security group rule 85 - os_security_group_rule: 86 cloud: mordred 87 security_group: foo 88 protocol: tcp 89 port_range_min: 80 90 port_range_max: 80 91 remote_ip_prefix: 0.0.0.0/0 92 93 # Create a security group rule for ping 94 - os_security_group_rule: 95 cloud: mordred 96 security_group: foo 97 protocol: icmp 98 remote_ip_prefix: 0.0.0.0/0 99 100 # Another way to create the ping rule 101 - os_security_group_rule: 102 cloud: mordred 103 security_group: foo 104 protocol: icmp 105 port_range_min: -1 106 port_range_max: -1 107 remote_ip_prefix: 0.0.0.0/0 108 109 # Create a TCP rule covering all ports 110 - os_security_group_rule: 111 cloud: mordred 112 security_group: foo 113 protocol: tcp 114 port_range_min: 1 115 port_range_max: 65535 116 remote_ip_prefix: 0.0.0.0/0 117 118 # Another way to create the TCP rule above (defaults to all ports) 119 - os_security_group_rule: 120 cloud: mordred 121 security_group: foo 122 protocol: tcp 123 remote_ip_prefix: 0.0.0.0/0 124 125 # Create a rule for VRRP with numbered protocol 112 126 - os_security_group_rule: 127 security_group: loadbalancer_sg 128 protocol: 112 129 remote_group: loadbalancer-node_sg 130 ''' 131 132 RETURN = ''' 133 id: 134 description: Unique rule UUID. 135 type: string 136 direction: 137 description: The direction in which the security group rule is applied. 138 type: string 139 sample: 'egress' 140 ethertype: 141 description: One of IPv4 or IPv6. 142 type: string 143 sample: 'IPv4' 144 port_range_min: 145 description: The minimum port number in the range that is matched by 146 the security group rule. 147 type: int 148 sample: 8000 149 port_range_max: 150 description: The maximum port number in the range that is matched by 151 the security group rule. 152 type: int 153 sample: 8000 154 protocol: 155 description: The protocol that is matched by the security group rule. 156 type: string 157 sample: 'tcp' 158 remote_ip_prefix: 159 description: The remote IP prefix to be associated with this security group rule. 160 type: string 161 sample: '0.0.0.0/0' 162 security_group_id: 163 description: The security group ID to associate with this security group rule. 164 type: string 165 ''' 166 167 168 def _ports_match(protocol, module_min, module_max, rule_min, rule_max): 169 """ 170 Capture the complex port matching logic. 171 172 The port values coming in for the module might be -1 (for ICMP), 173 which will work only for Nova, but this is handled by shade. Likewise, 174 they might be None, which works for Neutron, but not Nova. This too is 175 handled by shade. Since shade will consistently return these port 176 values as None, we need to convert any -1 values input to the module 177 to None here for comparison. 178 179 For TCP and UDP protocols, None values for both min and max are 180 represented as the range 1-65535 for Nova, but remain None for 181 Neutron. Shade returns the full range when Nova is the backend (since 182 that is how Nova stores them), and None values for Neutron. If None 183 values are input to the module for both values, then we need to adjust 184 for comparison. 185 """ 186 187 # Check if the user is supplying -1 for ICMP. 188 if protocol == 'icmp': 189 if module_min and int(module_min) == -1: 190 module_min = None 191 if module_max and int(module_max) == -1: 192 module_max = None 193 194 # Check if user is supplying None values for full TCP/UDP port range. 195 if protocol in ['tcp', 'udp'] and module_min is None and module_max is None: 196 if (rule_min and int(rule_min) == 1 197 and rule_max and int(rule_max) == 65535): 198 # (None, None) == (1, 65535) 199 return True 200 201 # Sanity check to make sure we don't have type comparison issues. 202 if module_min: 203 module_min = int(module_min) 204 if module_max: 205 module_max = int(module_max) 206 if rule_min: 207 rule_min = int(rule_min) 208 if rule_max: 209 rule_max = int(rule_max) 210 211 return module_min == rule_min and module_max == rule_max 212 213 214 def _find_matching_rule(module, secgroup, remotegroup): 215 """ 216 Find a rule in the group that matches the module parameters. 217 :returns: The matching rule dict, or None if no matches. 218 """ 219 protocol = module.params['protocol'] 220 remote_ip_prefix = module.params['remote_ip_prefix'] 221 ethertype = module.params['ethertype'] 222 direction = module.params['direction'] 223 remote_group_id = remotegroup['id'] 224 225 for rule in secgroup['security_group_rules']: 226 if (protocol == rule['protocol'] 227 and remote_ip_prefix == rule['remote_ip_prefix'] 228 and ethertype == rule['ethertype'] 229 and direction == rule['direction'] 230 and remote_group_id == rule['remote_group_id'] 231 and _ports_match(protocol, 232 module.params['port_range_min'], 233 module.params['port_range_max'], 234 rule['port_range_min'], 235 rule['port_range_max'])): 236 return rule 237 return None 238 239 240 def _system_state_change(module, secgroup, remotegroup): 241 state = module.params['state'] 242 if secgroup: 243 rule_exists = _find_matching_rule(module, secgroup, remotegroup) 244 else: 245 return False 246 247 if state == 'present' and not rule_exists: 248 return True 249 if state == 'absent' and rule_exists: 250 return True 251 return False 252 253 254 def main(): 255 argument_spec = openstack_full_argument_spec( 256 security_group = dict(required=True), 257 # NOTE(Shrews): None is an acceptable protocol value for 258 # Neutron, but Nova will balk at this. 259 protocol = dict(default=None, 260 choices=[None, 'tcp', 'udp', 'icmp', 112]), 261 port_range_min = dict(required=False, type='int'), 262 port_range_max = dict(required=False, type='int'), 263 remote_ip_prefix = dict(required=False, default=None), 264 remote_group = dict(required=False, default=None), 265 ethertype = dict(default='IPv4', 266 choices=['IPv4', 'IPv6']), 267 direction = dict(default='ingress', 268 choices=['egress', 'ingress']), 269 state = dict(default='present', 270 choices=['absent', 'present']), 271 ) 272 273 module_kwargs = openstack_module_kwargs( 274 mutually_exclusive=[ 275 ['remote_ip_prefix', 'remote_group'], 276 ] 277 ) 278 279 module = AnsibleModule(argument_spec, 280 supports_check_mode=True, 281 **module_kwargs) 282 283 if not HAS_SHADE: 284 module.fail_json(msg='shade is required for this module') 285 286 state = module.params['state'] 287 security_group = module.params['security_group'] 288 remote_group = module.params['remote_group'] 289 changed = False 290 291 try: 292 cloud = shade.openstack_cloud(**module.params) 293 secgroup = cloud.get_security_group(security_group) 294 295 if remote_group: 296 remotegroup = cloud.get_security_group(remote_group) 297 else: 298 remotegroup = { 'id' : None } 299 300 if module.check_mode: 301 module.exit_json(changed=_system_state_change(module, secgroup, remotegroup)) 302 303 if state == 'present': 304 if not secgroup: 305 module.fail_json(msg='Could not find security group %s' % 306 security_group) 307 308 rule = _find_matching_rule(module, secgroup, remotegroup) 309 if not rule: 310 rule = cloud.create_security_group_rule( 311 secgroup['id'], 312 port_range_min=module.params['port_range_min'], 313 port_range_max=module.params['port_range_max'], 314 protocol=module.params['protocol'], 315 remote_ip_prefix=module.params['remote_ip_prefix'], 316 remote_group_id=remotegroup['id'], 317 direction=module.params['direction'], 318 ethertype=module.params['ethertype'] 319 ) 320 changed = True 321 module.exit_json(changed=changed, rule=rule, id=rule['id']) 322 323 if state == 'absent' and secgroup: 324 rule = _find_matching_rule(module, secgroup, remotegroup) 325 if rule: 326 cloud.delete_security_group_rule(rule['id']) 327 changed = True 328 329 module.exit_json(changed=changed) 330 331 except shade.OpenStackCloudException as e: 332 module.fail_json(msg=str(e)) 333 334 # this is magic, see lib/ansible/module_common.py 335 from ansible.module_utils.basic import * 336 from ansible.module_utils.openstack import * 337 338 if __name__ == '__main__': 339 main() 340 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -39,7 +39,7 @@ protocol: description: - IP protocols TCP UDP ICMP 112 (VRRP) - choices: ['tcp', 'udp', 'icmp', 112, None] + choices: ['tcp', 'udp', 'icmp', '112', None] default: None port_range_min: description: @@ -257,7 +257,7 @@ # NOTE(Shrews): None is an acceptable protocol value for # Neutron, but Nova will balk at this. protocol = dict(default=None, - choices=[None, 'tcp', 'udp', 'icmp', 112]), + choices=[None, 'tcp', 'udp', 'icmp', '112']), port_range_min = dict(required=False, type='int'), port_range_max = dict(required=False, type='int'), remote_ip_prefix = dict(required=False, default=None),
{"golden_diff": "diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py\n--- a/cloud/openstack/os_security_group_rule.py\n+++ b/cloud/openstack/os_security_group_rule.py\n@@ -39,7 +39,7 @@\n protocol:\n description:\n - IP protocols TCP UDP ICMP 112 (VRRP)\n- choices: ['tcp', 'udp', 'icmp', 112, None]\n+ choices: ['tcp', 'udp', 'icmp', '112', None]\n default: None\n port_range_min:\n description:\n@@ -257,7 +257,7 @@\n # NOTE(Shrews): None is an acceptable protocol value for\n # Neutron, but Nova will balk at this.\n protocol = dict(default=None,\n- choices=[None, 'tcp', 'udp', 'icmp', 112]),\n+ choices=[None, 'tcp', 'udp', 'icmp', '112']),\n port_range_min = dict(required=False, type='int'),\n port_range_max = dict(required=False, type='int'),\n remote_ip_prefix = dict(required=False, default=None),\n", "issue": "Fix #4412: os_security_group_rule support numbered protocols\n##### ISSUE TYPE\n- Bugfix Pull Request\n##### COMPONENT NAME\n\ncloud/openstack/os_security_group_rule.py\n##### ANSIBLE VERSION\n\n```\nansible 2.1.1.0\n config file = \n configured module search path = Default w/o overrides\n```\n##### SUMMARY\n\nBugfix correctly points out lack of VRRP support by way of numbered protocol. This commit adds this functionality, including example usage in the documentation.\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2013, Benno Joy <[email protected]>\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_security_group_rule\nshort_description: Add/Delete rule from an existing security group\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\ndescription:\n - Add or Remove rule from an existing security group\noptions:\n security_group:\n description:\n - Name or ID of the security group\n required: true\n protocol:\n description:\n - IP protocols TCP UDP ICMP 112 (VRRP)\n choices: ['tcp', 'udp', 'icmp', 112, None]\n default: None\n port_range_min:\n description:\n - Starting port\n required: false\n default: None\n port_range_max:\n description:\n - Ending port\n required: false\n default: None\n remote_ip_prefix:\n description:\n - Source IP address(es) in CIDR notation (exclusive with remote_group)\n required: false\n remote_group:\n description:\n - Name or ID of the Security group to link (exclusive with\n remote_ip_prefix)\n required: false\n ethertype:\n description:\n - Must be IPv4 or IPv6, and addresses represented in CIDR must\n match the ingress or egress rules. Not all providers support IPv6.\n choices: ['IPv4', 'IPv6']\n default: IPv4\n direction:\n description:\n - The direction in which the security group rule is applied. Not\n all providers support egress.\n choices: ['egress', 'ingress']\n default: ingress\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements: [\"shade\"]\n'''\n\nEXAMPLES = '''\n# Create a security group rule\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n port_range_min: 80\n port_range_max: 80\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a security group rule for ping\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: icmp\n remote_ip_prefix: 0.0.0.0/0\n\n# Another way to create the ping rule\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: icmp\n port_range_min: -1\n port_range_max: -1\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a TCP rule covering all ports\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n port_range_min: 1\n port_range_max: 65535\n remote_ip_prefix: 0.0.0.0/0\n\n# Another way to create the TCP rule above (defaults to all ports)\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a rule for VRRP with numbered protocol 112\n- os_security_group_rule:\n security_group: loadbalancer_sg\n protocol: 112\n remote_group: loadbalancer-node_sg\n'''\n\nRETURN = '''\nid:\n description: Unique rule UUID.\n type: string\ndirection:\n description: The direction in which the security group rule is applied.\n type: string\n sample: 'egress'\nethertype:\n description: One of IPv4 or IPv6.\n type: string\n sample: 'IPv4'\nport_range_min:\n description: The minimum port number in the range that is matched by\n the security group rule.\n type: int\n sample: 8000\nport_range_max:\n description: The maximum port number in the range that is matched by\n the security group rule.\n type: int\n sample: 8000\nprotocol:\n description: The protocol that is matched by the security group rule.\n type: string\n sample: 'tcp'\nremote_ip_prefix:\n description: The remote IP prefix to be associated with this security group rule.\n type: string\n sample: '0.0.0.0/0'\nsecurity_group_id:\n description: The security group ID to associate with this security group rule.\n type: string\n'''\n\n\ndef _ports_match(protocol, module_min, module_max, rule_min, rule_max):\n \"\"\"\n Capture the complex port matching logic.\n\n The port values coming in for the module might be -1 (for ICMP),\n which will work only for Nova, but this is handled by shade. Likewise,\n they might be None, which works for Neutron, but not Nova. This too is\n handled by shade. Since shade will consistently return these port\n values as None, we need to convert any -1 values input to the module\n to None here for comparison.\n\n For TCP and UDP protocols, None values for both min and max are\n represented as the range 1-65535 for Nova, but remain None for\n Neutron. Shade returns the full range when Nova is the backend (since\n that is how Nova stores them), and None values for Neutron. If None\n values are input to the module for both values, then we need to adjust\n for comparison.\n \"\"\"\n\n # Check if the user is supplying -1 for ICMP.\n if protocol == 'icmp':\n if module_min and int(module_min) == -1:\n module_min = None\n if module_max and int(module_max) == -1:\n module_max = None\n\n # Check if user is supplying None values for full TCP/UDP port range.\n if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:\n if (rule_min and int(rule_min) == 1\n and rule_max and int(rule_max) == 65535):\n # (None, None) == (1, 65535)\n return True\n\n # Sanity check to make sure we don't have type comparison issues.\n if module_min:\n module_min = int(module_min)\n if module_max:\n module_max = int(module_max)\n if rule_min:\n rule_min = int(rule_min)\n if rule_max:\n rule_max = int(rule_max)\n\n return module_min == rule_min and module_max == rule_max\n\n\ndef _find_matching_rule(module, secgroup, remotegroup):\n \"\"\"\n Find a rule in the group that matches the module parameters.\n :returns: The matching rule dict, or None if no matches.\n \"\"\"\n protocol = module.params['protocol']\n remote_ip_prefix = module.params['remote_ip_prefix']\n ethertype = module.params['ethertype']\n direction = module.params['direction']\n remote_group_id = remotegroup['id']\n\n for rule in secgroup['security_group_rules']:\n if (protocol == rule['protocol']\n and remote_ip_prefix == rule['remote_ip_prefix']\n and ethertype == rule['ethertype']\n and direction == rule['direction']\n and remote_group_id == rule['remote_group_id']\n and _ports_match(protocol,\n module.params['port_range_min'],\n module.params['port_range_max'],\n rule['port_range_min'],\n rule['port_range_max'])):\n return rule\n return None\n\n\ndef _system_state_change(module, secgroup, remotegroup):\n state = module.params['state']\n if secgroup:\n rule_exists = _find_matching_rule(module, secgroup, remotegroup)\n else:\n return False\n\n if state == 'present' and not rule_exists:\n return True\n if state == 'absent' and rule_exists:\n return True\n return False\n\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n security_group = dict(required=True),\n # NOTE(Shrews): None is an acceptable protocol value for\n # Neutron, but Nova will balk at this.\n protocol = dict(default=None,\n choices=[None, 'tcp', 'udp', 'icmp', 112]),\n port_range_min = dict(required=False, type='int'),\n port_range_max = dict(required=False, type='int'),\n remote_ip_prefix = dict(required=False, default=None),\n remote_group = dict(required=False, default=None),\n ethertype = dict(default='IPv4',\n choices=['IPv4', 'IPv6']),\n direction = dict(default='ingress',\n choices=['egress', 'ingress']),\n state = dict(default='present',\n choices=['absent', 'present']),\n )\n\n module_kwargs = openstack_module_kwargs(\n mutually_exclusive=[\n ['remote_ip_prefix', 'remote_group'],\n ]\n )\n\n module = AnsibleModule(argument_spec,\n supports_check_mode=True,\n **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n state = module.params['state']\n security_group = module.params['security_group']\n remote_group = module.params['remote_group']\n changed = False\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n secgroup = cloud.get_security_group(security_group)\n\n if remote_group:\n remotegroup = cloud.get_security_group(remote_group)\n else:\n remotegroup = { 'id' : None }\n\n if module.check_mode:\n module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))\n\n if state == 'present':\n if not secgroup:\n module.fail_json(msg='Could not find security group %s' %\n security_group)\n\n rule = _find_matching_rule(module, secgroup, remotegroup)\n if not rule:\n rule = cloud.create_security_group_rule(\n secgroup['id'],\n port_range_min=module.params['port_range_min'],\n port_range_max=module.params['port_range_max'],\n protocol=module.params['protocol'],\n remote_ip_prefix=module.params['remote_ip_prefix'],\n remote_group_id=remotegroup['id'],\n direction=module.params['direction'],\n ethertype=module.params['ethertype']\n )\n changed = True\n module.exit_json(changed=changed, rule=rule, id=rule['id'])\n\n if state == 'absent' and secgroup:\n rule = _find_matching_rule(module, secgroup, remotegroup)\n if rule:\n cloud.delete_security_group_rule(rule['id'])\n changed = True\n\n module.exit_json(changed=changed)\n\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_security_group_rule.py"}], "after_files": [{"content": "#!/usr/bin/python\n\n# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.\n# Copyright (c) 2013, Benno Joy <[email protected]>\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_security_group_rule\nshort_description: Add/Delete rule from an existing security group\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\ndescription:\n - Add or Remove rule from an existing security group\noptions:\n security_group:\n description:\n - Name or ID of the security group\n required: true\n protocol:\n description:\n - IP protocols TCP UDP ICMP 112 (VRRP)\n choices: ['tcp', 'udp', 'icmp', '112', None]\n default: None\n port_range_min:\n description:\n - Starting port\n required: false\n default: None\n port_range_max:\n description:\n - Ending port\n required: false\n default: None\n remote_ip_prefix:\n description:\n - Source IP address(es) in CIDR notation (exclusive with remote_group)\n required: false\n remote_group:\n description:\n - Name or ID of the Security group to link (exclusive with\n remote_ip_prefix)\n required: false\n ethertype:\n description:\n - Must be IPv4 or IPv6, and addresses represented in CIDR must\n match the ingress or egress rules. Not all providers support IPv6.\n choices: ['IPv4', 'IPv6']\n default: IPv4\n direction:\n description:\n - The direction in which the security group rule is applied. Not\n all providers support egress.\n choices: ['egress', 'ingress']\n default: ingress\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements: [\"shade\"]\n'''\n\nEXAMPLES = '''\n# Create a security group rule\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n port_range_min: 80\n port_range_max: 80\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a security group rule for ping\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: icmp\n remote_ip_prefix: 0.0.0.0/0\n\n# Another way to create the ping rule\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: icmp\n port_range_min: -1\n port_range_max: -1\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a TCP rule covering all ports\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n port_range_min: 1\n port_range_max: 65535\n remote_ip_prefix: 0.0.0.0/0\n\n# Another way to create the TCP rule above (defaults to all ports)\n- os_security_group_rule:\n cloud: mordred\n security_group: foo\n protocol: tcp\n remote_ip_prefix: 0.0.0.0/0\n\n# Create a rule for VRRP with numbered protocol 112\n- os_security_group_rule:\n security_group: loadbalancer_sg\n protocol: 112\n remote_group: loadbalancer-node_sg\n'''\n\nRETURN = '''\nid:\n description: Unique rule UUID.\n type: string\ndirection:\n description: The direction in which the security group rule is applied.\n type: string\n sample: 'egress'\nethertype:\n description: One of IPv4 or IPv6.\n type: string\n sample: 'IPv4'\nport_range_min:\n description: The minimum port number in the range that is matched by\n the security group rule.\n type: int\n sample: 8000\nport_range_max:\n description: The maximum port number in the range that is matched by\n the security group rule.\n type: int\n sample: 8000\nprotocol:\n description: The protocol that is matched by the security group rule.\n type: string\n sample: 'tcp'\nremote_ip_prefix:\n description: The remote IP prefix to be associated with this security group rule.\n type: string\n sample: '0.0.0.0/0'\nsecurity_group_id:\n description: The security group ID to associate with this security group rule.\n type: string\n'''\n\n\ndef _ports_match(protocol, module_min, module_max, rule_min, rule_max):\n \"\"\"\n Capture the complex port matching logic.\n\n The port values coming in for the module might be -1 (for ICMP),\n which will work only for Nova, but this is handled by shade. Likewise,\n they might be None, which works for Neutron, but not Nova. This too is\n handled by shade. Since shade will consistently return these port\n values as None, we need to convert any -1 values input to the module\n to None here for comparison.\n\n For TCP and UDP protocols, None values for both min and max are\n represented as the range 1-65535 for Nova, but remain None for\n Neutron. Shade returns the full range when Nova is the backend (since\n that is how Nova stores them), and None values for Neutron. If None\n values are input to the module for both values, then we need to adjust\n for comparison.\n \"\"\"\n\n # Check if the user is supplying -1 for ICMP.\n if protocol == 'icmp':\n if module_min and int(module_min) == -1:\n module_min = None\n if module_max and int(module_max) == -1:\n module_max = None\n\n # Check if user is supplying None values for full TCP/UDP port range.\n if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:\n if (rule_min and int(rule_min) == 1\n and rule_max and int(rule_max) == 65535):\n # (None, None) == (1, 65535)\n return True\n\n # Sanity check to make sure we don't have type comparison issues.\n if module_min:\n module_min = int(module_min)\n if module_max:\n module_max = int(module_max)\n if rule_min:\n rule_min = int(rule_min)\n if rule_max:\n rule_max = int(rule_max)\n\n return module_min == rule_min and module_max == rule_max\n\n\ndef _find_matching_rule(module, secgroup, remotegroup):\n \"\"\"\n Find a rule in the group that matches the module parameters.\n :returns: The matching rule dict, or None if no matches.\n \"\"\"\n protocol = module.params['protocol']\n remote_ip_prefix = module.params['remote_ip_prefix']\n ethertype = module.params['ethertype']\n direction = module.params['direction']\n remote_group_id = remotegroup['id']\n\n for rule in secgroup['security_group_rules']:\n if (protocol == rule['protocol']\n and remote_ip_prefix == rule['remote_ip_prefix']\n and ethertype == rule['ethertype']\n and direction == rule['direction']\n and remote_group_id == rule['remote_group_id']\n and _ports_match(protocol,\n module.params['port_range_min'],\n module.params['port_range_max'],\n rule['port_range_min'],\n rule['port_range_max'])):\n return rule\n return None\n\n\ndef _system_state_change(module, secgroup, remotegroup):\n state = module.params['state']\n if secgroup:\n rule_exists = _find_matching_rule(module, secgroup, remotegroup)\n else:\n return False\n\n if state == 'present' and not rule_exists:\n return True\n if state == 'absent' and rule_exists:\n return True\n return False\n\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n security_group = dict(required=True),\n # NOTE(Shrews): None is an acceptable protocol value for\n # Neutron, but Nova will balk at this.\n protocol = dict(default=None,\n choices=[None, 'tcp', 'udp', 'icmp', '112']),\n port_range_min = dict(required=False, type='int'),\n port_range_max = dict(required=False, type='int'),\n remote_ip_prefix = dict(required=False, default=None),\n remote_group = dict(required=False, default=None),\n ethertype = dict(default='IPv4',\n choices=['IPv4', 'IPv6']),\n direction = dict(default='ingress',\n choices=['egress', 'ingress']),\n state = dict(default='present',\n choices=['absent', 'present']),\n )\n\n module_kwargs = openstack_module_kwargs(\n mutually_exclusive=[\n ['remote_ip_prefix', 'remote_group'],\n ]\n )\n\n module = AnsibleModule(argument_spec,\n supports_check_mode=True,\n **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n state = module.params['state']\n security_group = module.params['security_group']\n remote_group = module.params['remote_group']\n changed = False\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n secgroup = cloud.get_security_group(security_group)\n\n if remote_group:\n remotegroup = cloud.get_security_group(remote_group)\n else:\n remotegroup = { 'id' : None }\n\n if module.check_mode:\n module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))\n\n if state == 'present':\n if not secgroup:\n module.fail_json(msg='Could not find security group %s' %\n security_group)\n\n rule = _find_matching_rule(module, secgroup, remotegroup)\n if not rule:\n rule = cloud.create_security_group_rule(\n secgroup['id'],\n port_range_min=module.params['port_range_min'],\n port_range_max=module.params['port_range_max'],\n protocol=module.params['protocol'],\n remote_ip_prefix=module.params['remote_ip_prefix'],\n remote_group_id=remotegroup['id'],\n direction=module.params['direction'],\n ethertype=module.params['ethertype']\n )\n changed = True\n module.exit_json(changed=changed, rule=rule, id=rule['id'])\n\n if state == 'absent' and secgroup:\n rule = _find_matching_rule(module, secgroup, remotegroup)\n if rule:\n cloud.delete_security_group_rule(rule['id'])\n changed = True\n\n module.exit_json(changed=changed)\n\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_security_group_rule.py"}]}
3,890
261
gh_patches_debug_38258
rasdani/github-patches
git_diff
TencentBlueKing__bk-user-408
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [日志] auth fail的日志没有区分度 大量的相同的错误信息, 无法快速精确答复`调用方`的疑惑 ``` msg = "Invalid token header. No credentials provided." ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/api/bkuser_core/enhanced_account/authentication.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. 4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. 5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at http://opensource.org/licenses/MIT 7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on 8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the 9 specific language governing permissions and limitations under the License. 10 """ 11 import base64 12 import logging 13 import re 14 15 import jwt 16 from django.conf import settings 17 from django.contrib.auth import get_user_model 18 from django.core.cache import caches 19 from rest_framework import exceptions 20 from rest_framework.authentication import BaseAuthentication, get_authorization_header 21 22 from bkuser_core.esb_sdk.shortcuts import get_client_by_raw_username 23 24 logger = logging.getLogger(__name__) 25 26 27 HEADER_JWT_KEY_NAME = "HTTP_X_BKAPI_JWT" 28 HEADER_APP_CODE_KEY_NAME = "HTTP_X_BK_APP_CODE" 29 HEADER_APP_SECRET_KEY_NAME = "HTTP_X_BK_APP_SECRET" 30 31 ESB_PUBLIC_KEY_CACHE_KEY = "bk_user:esb_public_key" 32 33 TOKEN_KEY_NAME = "token" 34 35 36 def create_user(username="admin"): 37 return get_user_model()(username=username, is_staff=True, is_superuser=True) 38 39 40 class InternalTokenAuthentication(BaseAuthentication): 41 42 keyword = "iBearer" 43 model = None 44 45 query_params_keyword = TOKEN_KEY_NAME 46 47 def get_token_from_query_params(self, request): 48 try: 49 return request.query_params[self.query_params_keyword] 50 except KeyError: 51 msg = "Invalid token header. No credentials provided." 52 raise exceptions.AuthenticationFailed(msg) 53 54 def get_token_from_header(self, request): 55 auth = get_authorization_header(request).split() 56 57 if not auth or auth[0].lower() != self.keyword.lower().encode(): 58 msg = "Invalid token header. No credentials provided." 59 raise exceptions.AuthenticationFailed(msg) 60 61 if len(auth) == 1: 62 msg = "Invalid token header. No credentials provided." 63 raise exceptions.AuthenticationFailed(msg) 64 elif len(auth) > 2: 65 msg = "Invalid token header. Token string should not contain spaces." 66 raise exceptions.AuthenticationFailed(msg) 67 68 try: 69 token = auth[1].decode() 70 except UnicodeError: 71 msg = "Invalid token header. Token string should not contain invalid characters." 72 raise exceptions.AuthenticationFailed(msg) 73 74 return token 75 76 def authenticate(self, request): 77 for white_url in settings.AUTH_EXEMPT_PATHS: 78 if re.search(white_url, request.path): 79 logger.info("%s path in white_url<%s>, exempting auth", request.path, white_url) 80 return None, None 81 82 try: 83 token = self.get_token_from_query_params(request) 84 except exceptions.AuthenticationFailed: 85 logger.debug("no token from query params, trying to get from header instead") 86 token = self.get_token_from_header(request) 87 88 return self.authenticate_credentials(token) 89 90 def authenticate_credentials(self, key): 91 """Use access token to identify user""" 92 if key in settings.INTERNAL_AUTH_TOKENS: 93 user_info = settings.INTERNAL_AUTH_TOKENS[key] 94 return create_user(user_info["username"]), None 95 raise exceptions.AuthenticationFailed("request failed: Invalid token header. No credentials provided.") 96 97 98 class ESBOrAPIGatewayAuthentication(BaseAuthentication): 99 def authenticate(self, request): 100 # get jwt from header 101 jwt_content = request.META.get(HEADER_JWT_KEY_NAME, "") 102 if not jwt_content: 103 return None, None 104 105 # get the public key 106 jwt_header = jwt.get_unverified_header(jwt_content) 107 api_name = jwt_header.get("kid") or "" 108 public_key = self._get_public_key(api_name) 109 algorithm = jwt_header.get("alg") or "RS512" 110 111 # do decode, without verify issuer 112 try: 113 jwt_playload = jwt.decode(jwt_content, public_key, algorithm) 114 except Exception: # pylint: disable=broad-except 115 logger.exception("JWT decode failed! jwt_payload: %s, public_key: %s", jwt_content, public_key) 116 raise exceptions.AuthenticationFailed("decode jwt token fail") 117 118 # username = self._get_username_from_jwt_payload(payload) 119 app_code = self._get_app_code_from_jwt_payload(jwt_playload) 120 request.bk_app_code = app_code 121 122 username = "APIGW" if api_name == settings.BK_APIGW_NAME else "ESB" 123 return create_user(username), None 124 125 def _get_public_key(self, api_name): 126 # it's from apigateway 127 if api_name == settings.BK_APIGW_NAME: 128 return self._get_apigw_public_key() 129 # it's from esb 130 else: 131 return self._get_esb_public_key() 132 133 def _get_apigw_public_key(self): 134 """ 135 获取APIGW的PUBLIC KEY 136 由于配置文件里的public key 是来着环境变量,且使用了base64编码的,所以需要获取后解码 137 """ 138 # 如果BK_APIGW_PUBLIC_KEY为空,则直接报错 139 if not settings.BK_APIGW_PUBLIC_KEY: 140 raise exceptions.AuthenticationFailed("BK_APIGW_PUBLIC_KEY can not be empty") 141 142 # base64解码 143 try: 144 public_key = base64.b64decode(settings.BK_APIGW_PUBLIC_KEY).decode("utf-8") 145 except Exception: # pylint: disable=broad-except 146 logger.exception( 147 "BK_APIGW_PUBLIC_KEY is not valid base64 string! public_key=%s", settings.BK_APIGW_PUBLIC_KEY 148 ) 149 raise exceptions.AuthenticationFailed("BK_APIGW_PUBLIC_KEY is not valid base64 string!") 150 151 return public_key 152 153 def _get_esb_public_key(self): 154 cache = caches["locmem"] 155 public_key = cache.get(ESB_PUBLIC_KEY_CACHE_KEY) 156 if public_key is not None: 157 return public_key 158 159 # get from esb 160 client = get_client_by_raw_username("admin") 161 try: 162 ret = client.esb.get_public_key() 163 except Exception: # pylint: disable=broad-except 164 logger.exception("Get ESB Public Key failed!") 165 raise exceptions.AuthenticationFailed("Get ESB Public Key failed!") 166 167 if not ret.get("result", False): 168 msg = ret.get("message", "unknown error") 169 logger.error("Get ESB Public Key failed! %s", msg) 170 raise exceptions.AuthenticationFailed(f"Get ESB Public Key failed! {msg}") 171 172 public_key = ret.get("data", {}).get("public_key", "") 173 if public_key is None: 174 logger.error("Get ESB Public Key failed! public_key is empty, ret=%s", ret) 175 raise exceptions.AuthenticationFailed("Get ESB Public Key failed! the public key is empty") 176 177 cache.set(ESB_PUBLIC_KEY_CACHE_KEY, public_key, 60 * 60) 178 return public_key 179 180 def _get_app_code_from_jwt_payload(self, jwt_payload): 181 """从jwt里获取app_code""" 182 app = jwt_payload.get("app", {}) 183 184 verified = app.get("verified", False) 185 if not verified: 186 raise exceptions.AuthenticationFailed("app is not verified") 187 188 app_code = app.get("bk_app_code", "") or app.get("app_code", "") 189 # 虽然app_code为空对于后续的鉴权一定是不通过的,但鉴权不通过有很多原因,这里提前log便于问题排查 190 if not app_code: 191 raise exceptions.AuthenticationFailed("could not get app_code from esb/apigateway jwt payload! it's empty") 192 193 return app_code 194 195 196 class AppCodeAppSecretAuthentication(BaseAuthentication): 197 """ 198 通过app_code和app_secret进行鉴权 199 """ 200 201 def authenticate(self, request): 202 # get app_code and app_secret from header 203 app_code = request.META.get(HEADER_APP_CODE_KEY_NAME, "") 204 app_secret = request.META.get(HEADER_APP_SECRET_KEY_NAME, "") 205 206 if app_code == settings.APP_ID and app_secret == settings.APP_TOKEN: 207 return create_user("SAAS"), None 208 209 return None, None 210 211 212 class MultipleAuthentication(BaseAuthentication): 213 """it's a dispatcher""" 214 215 def authenticate(self, request): 216 # FIXME: 最终, 下掉token, 只保留 jwt + app_code/app_secret 217 # withe list 218 for white_url in settings.AUTH_EXEMPT_PATHS: 219 if re.search(white_url, request.path): 220 logger.debug("%s path in white_url<%s>, exempting auth", request.path, white_url) 221 return None, None 222 223 # app_code and app_secret 224 if HEADER_APP_CODE_KEY_NAME in request.META and HEADER_APP_SECRET_KEY_NAME in request.META: 225 return AppCodeAppSecretAuthentication().authenticate(request) 226 227 # FIXME: should remove this totally 228 # NOTE: some case we want to use token as credentials, call through APIGateway(set default headers) 229 # so we should verify others first, not jwt 230 if get_authorization_header(request) or request.query_params.get(TOKEN_KEY_NAME): 231 # token 232 return InternalTokenAuthentication().authenticate(request) 233 234 # jwt 235 if HEADER_JWT_KEY_NAME in request.META: 236 return ESBOrAPIGatewayAuthentication().authenticate(request) 237 238 raise exceptions.AuthenticationFailed( 239 "no valid authentication credentials provided! should call through APIGateway/ESB" 240 ) 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/api/bkuser_core/enhanced_account/authentication.py b/src/api/bkuser_core/enhanced_account/authentication.py --- a/src/api/bkuser_core/enhanced_account/authentication.py +++ b/src/api/bkuser_core/enhanced_account/authentication.py @@ -48,21 +48,24 @@ try: return request.query_params[self.query_params_keyword] except KeyError: - msg = "Invalid token header. No credentials provided." + msg = f"Invalid token header. No credentials provided. {self.query_params_keyword} is not in query params" raise exceptions.AuthenticationFailed(msg) def get_token_from_header(self, request): auth = get_authorization_header(request).split() if not auth or auth[0].lower() != self.keyword.lower().encode(): - msg = "Invalid token header. No credentials provided." + msg = "Invalid token header. No credentials provided. The format should be `iBearer THE_TOKEN`" raise exceptions.AuthenticationFailed(msg) if len(auth) == 1: - msg = "Invalid token header. No credentials provided." + msg = "Invalid token header. No credentials provided. The size of auth array credentials is 0" raise exceptions.AuthenticationFailed(msg) elif len(auth) > 2: - msg = "Invalid token header. Token string should not contain spaces." + msg = ( + "Invalid token header. Token string should not contain spaces. " + + "The size of auth array credentials is more than 2" + ) raise exceptions.AuthenticationFailed(msg) try: @@ -74,11 +77,6 @@ return token def authenticate(self, request): - for white_url in settings.AUTH_EXEMPT_PATHS: - if re.search(white_url, request.path): - logger.info("%s path in white_url<%s>, exempting auth", request.path, white_url) - return None, None - try: token = self.get_token_from_query_params(request) except exceptions.AuthenticationFailed: @@ -92,7 +90,9 @@ if key in settings.INTERNAL_AUTH_TOKENS: user_info = settings.INTERNAL_AUTH_TOKENS[key] return create_user(user_info["username"]), None - raise exceptions.AuthenticationFailed("request failed: Invalid token header. No credentials provided.") + raise exceptions.AuthenticationFailed( + "request failed: Invalid token header. No credentials provided or Wrong credentials." + ) class ESBOrAPIGatewayAuthentication(BaseAuthentication):
{"golden_diff": "diff --git a/src/api/bkuser_core/enhanced_account/authentication.py b/src/api/bkuser_core/enhanced_account/authentication.py\n--- a/src/api/bkuser_core/enhanced_account/authentication.py\n+++ b/src/api/bkuser_core/enhanced_account/authentication.py\n@@ -48,21 +48,24 @@\n try:\n return request.query_params[self.query_params_keyword]\n except KeyError:\n- msg = \"Invalid token header. No credentials provided.\"\n+ msg = f\"Invalid token header. No credentials provided. {self.query_params_keyword} is not in query params\"\n raise exceptions.AuthenticationFailed(msg)\n \n def get_token_from_header(self, request):\n auth = get_authorization_header(request).split()\n \n if not auth or auth[0].lower() != self.keyword.lower().encode():\n- msg = \"Invalid token header. No credentials provided.\"\n+ msg = \"Invalid token header. No credentials provided. The format should be `iBearer THE_TOKEN`\"\n raise exceptions.AuthenticationFailed(msg)\n \n if len(auth) == 1:\n- msg = \"Invalid token header. No credentials provided.\"\n+ msg = \"Invalid token header. No credentials provided. The size of auth array credentials is 0\"\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n- msg = \"Invalid token header. Token string should not contain spaces.\"\n+ msg = (\n+ \"Invalid token header. Token string should not contain spaces. \"\n+ + \"The size of auth array credentials is more than 2\"\n+ )\n raise exceptions.AuthenticationFailed(msg)\n \n try:\n@@ -74,11 +77,6 @@\n return token\n \n def authenticate(self, request):\n- for white_url in settings.AUTH_EXEMPT_PATHS:\n- if re.search(white_url, request.path):\n- logger.info(\"%s path in white_url<%s>, exempting auth\", request.path, white_url)\n- return None, None\n-\n try:\n token = self.get_token_from_query_params(request)\n except exceptions.AuthenticationFailed:\n@@ -92,7 +90,9 @@\n if key in settings.INTERNAL_AUTH_TOKENS:\n user_info = settings.INTERNAL_AUTH_TOKENS[key]\n return create_user(user_info[\"username\"]), None\n- raise exceptions.AuthenticationFailed(\"request failed: Invalid token header. No credentials provided.\")\n+ raise exceptions.AuthenticationFailed(\n+ \"request failed: Invalid token header. No credentials provided or Wrong credentials.\"\n+ )\n \n \n class ESBOrAPIGatewayAuthentication(BaseAuthentication):\n", "issue": "[\u65e5\u5fd7] auth fail\u7684\u65e5\u5fd7\u6ca1\u6709\u533a\u5206\u5ea6\n\u5927\u91cf\u7684\u76f8\u540c\u7684\u9519\u8bef\u4fe1\u606f, \u65e0\u6cd5\u5feb\u901f\u7cbe\u786e\u7b54\u590d`\u8c03\u7528\u65b9`\u7684\u7591\u60d1\r\n\r\n```\r\nmsg = \"Invalid token header. No credentials provided.\"\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport base64\nimport logging\nimport re\n\nimport jwt\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import caches\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import BaseAuthentication, get_authorization_header\n\nfrom bkuser_core.esb_sdk.shortcuts import get_client_by_raw_username\n\nlogger = logging.getLogger(__name__)\n\n\nHEADER_JWT_KEY_NAME = \"HTTP_X_BKAPI_JWT\"\nHEADER_APP_CODE_KEY_NAME = \"HTTP_X_BK_APP_CODE\"\nHEADER_APP_SECRET_KEY_NAME = \"HTTP_X_BK_APP_SECRET\"\n\nESB_PUBLIC_KEY_CACHE_KEY = \"bk_user:esb_public_key\"\n\nTOKEN_KEY_NAME = \"token\"\n\n\ndef create_user(username=\"admin\"):\n return get_user_model()(username=username, is_staff=True, is_superuser=True)\n\n\nclass InternalTokenAuthentication(BaseAuthentication):\n\n keyword = \"iBearer\"\n model = None\n\n query_params_keyword = TOKEN_KEY_NAME\n\n def get_token_from_query_params(self, request):\n try:\n return request.query_params[self.query_params_keyword]\n except KeyError:\n msg = \"Invalid token header. No credentials provided.\"\n raise exceptions.AuthenticationFailed(msg)\n\n def get_token_from_header(self, request):\n auth = get_authorization_header(request).split()\n\n if not auth or auth[0].lower() != self.keyword.lower().encode():\n msg = \"Invalid token header. No credentials provided.\"\n raise exceptions.AuthenticationFailed(msg)\n\n if len(auth) == 1:\n msg = \"Invalid token header. No credentials provided.\"\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = \"Invalid token header. Token string should not contain spaces.\"\n raise exceptions.AuthenticationFailed(msg)\n\n try:\n token = auth[1].decode()\n except UnicodeError:\n msg = \"Invalid token header. Token string should not contain invalid characters.\"\n raise exceptions.AuthenticationFailed(msg)\n\n return token\n\n def authenticate(self, request):\n for white_url in settings.AUTH_EXEMPT_PATHS:\n if re.search(white_url, request.path):\n logger.info(\"%s path in white_url<%s>, exempting auth\", request.path, white_url)\n return None, None\n\n try:\n token = self.get_token_from_query_params(request)\n except exceptions.AuthenticationFailed:\n logger.debug(\"no token from query params, trying to get from header instead\")\n token = self.get_token_from_header(request)\n\n return self.authenticate_credentials(token)\n\n def authenticate_credentials(self, key):\n \"\"\"Use access token to identify user\"\"\"\n if key in settings.INTERNAL_AUTH_TOKENS:\n user_info = settings.INTERNAL_AUTH_TOKENS[key]\n return create_user(user_info[\"username\"]), None\n raise exceptions.AuthenticationFailed(\"request failed: Invalid token header. No credentials provided.\")\n\n\nclass ESBOrAPIGatewayAuthentication(BaseAuthentication):\n def authenticate(self, request):\n # get jwt from header\n jwt_content = request.META.get(HEADER_JWT_KEY_NAME, \"\")\n if not jwt_content:\n return None, None\n\n # get the public key\n jwt_header = jwt.get_unverified_header(jwt_content)\n api_name = jwt_header.get(\"kid\") or \"\"\n public_key = self._get_public_key(api_name)\n algorithm = jwt_header.get(\"alg\") or \"RS512\"\n\n # do decode, without verify issuer\n try:\n jwt_playload = jwt.decode(jwt_content, public_key, algorithm)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"JWT decode failed! jwt_payload: %s, public_key: %s\", jwt_content, public_key)\n raise exceptions.AuthenticationFailed(\"decode jwt token fail\")\n\n # username = self._get_username_from_jwt_payload(payload)\n app_code = self._get_app_code_from_jwt_payload(jwt_playload)\n request.bk_app_code = app_code\n\n username = \"APIGW\" if api_name == settings.BK_APIGW_NAME else \"ESB\"\n return create_user(username), None\n\n def _get_public_key(self, api_name):\n # it's from apigateway\n if api_name == settings.BK_APIGW_NAME:\n return self._get_apigw_public_key()\n # it's from esb\n else:\n return self._get_esb_public_key()\n\n def _get_apigw_public_key(self):\n \"\"\"\n \u83b7\u53d6APIGW\u7684PUBLIC KEY\n \u7531\u4e8e\u914d\u7f6e\u6587\u4ef6\u91cc\u7684public key \u662f\u6765\u7740\u73af\u5883\u53d8\u91cf\uff0c\u4e14\u4f7f\u7528\u4e86base64\u7f16\u7801\u7684\uff0c\u6240\u4ee5\u9700\u8981\u83b7\u53d6\u540e\u89e3\u7801\n \"\"\"\n # \u5982\u679cBK_APIGW_PUBLIC_KEY\u4e3a\u7a7a\uff0c\u5219\u76f4\u63a5\u62a5\u9519\n if not settings.BK_APIGW_PUBLIC_KEY:\n raise exceptions.AuthenticationFailed(\"BK_APIGW_PUBLIC_KEY can not be empty\")\n\n # base64\u89e3\u7801\n try:\n public_key = base64.b64decode(settings.BK_APIGW_PUBLIC_KEY).decode(\"utf-8\")\n except Exception: # pylint: disable=broad-except\n logger.exception(\n \"BK_APIGW_PUBLIC_KEY is not valid base64 string! public_key=%s\", settings.BK_APIGW_PUBLIC_KEY\n )\n raise exceptions.AuthenticationFailed(\"BK_APIGW_PUBLIC_KEY is not valid base64 string!\")\n\n return public_key\n\n def _get_esb_public_key(self):\n cache = caches[\"locmem\"]\n public_key = cache.get(ESB_PUBLIC_KEY_CACHE_KEY)\n if public_key is not None:\n return public_key\n\n # get from esb\n client = get_client_by_raw_username(\"admin\")\n try:\n ret = client.esb.get_public_key()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Get ESB Public Key failed!\")\n raise exceptions.AuthenticationFailed(\"Get ESB Public Key failed!\")\n\n if not ret.get(\"result\", False):\n msg = ret.get(\"message\", \"unknown error\")\n logger.error(\"Get ESB Public Key failed! %s\", msg)\n raise exceptions.AuthenticationFailed(f\"Get ESB Public Key failed! {msg}\")\n\n public_key = ret.get(\"data\", {}).get(\"public_key\", \"\")\n if public_key is None:\n logger.error(\"Get ESB Public Key failed! public_key is empty, ret=%s\", ret)\n raise exceptions.AuthenticationFailed(\"Get ESB Public Key failed! the public key is empty\")\n\n cache.set(ESB_PUBLIC_KEY_CACHE_KEY, public_key, 60 * 60)\n return public_key\n\n def _get_app_code_from_jwt_payload(self, jwt_payload):\n \"\"\"\u4ecejwt\u91cc\u83b7\u53d6app_code\"\"\"\n app = jwt_payload.get(\"app\", {})\n\n verified = app.get(\"verified\", False)\n if not verified:\n raise exceptions.AuthenticationFailed(\"app is not verified\")\n\n app_code = app.get(\"bk_app_code\", \"\") or app.get(\"app_code\", \"\")\n # \u867d\u7136app_code\u4e3a\u7a7a\u5bf9\u4e8e\u540e\u7eed\u7684\u9274\u6743\u4e00\u5b9a\u662f\u4e0d\u901a\u8fc7\u7684\uff0c\u4f46\u9274\u6743\u4e0d\u901a\u8fc7\u6709\u5f88\u591a\u539f\u56e0\uff0c\u8fd9\u91cc\u63d0\u524dlog\u4fbf\u4e8e\u95ee\u9898\u6392\u67e5\n if not app_code:\n raise exceptions.AuthenticationFailed(\"could not get app_code from esb/apigateway jwt payload! it's empty\")\n\n return app_code\n\n\nclass AppCodeAppSecretAuthentication(BaseAuthentication):\n \"\"\"\n \u901a\u8fc7app_code\u548capp_secret\u8fdb\u884c\u9274\u6743\n \"\"\"\n\n def authenticate(self, request):\n # get app_code and app_secret from header\n app_code = request.META.get(HEADER_APP_CODE_KEY_NAME, \"\")\n app_secret = request.META.get(HEADER_APP_SECRET_KEY_NAME, \"\")\n\n if app_code == settings.APP_ID and app_secret == settings.APP_TOKEN:\n return create_user(\"SAAS\"), None\n\n return None, None\n\n\nclass MultipleAuthentication(BaseAuthentication):\n \"\"\"it's a dispatcher\"\"\"\n\n def authenticate(self, request):\n # FIXME: \u6700\u7ec8, \u4e0b\u6389token, \u53ea\u4fdd\u7559 jwt + app_code/app_secret\n # withe list\n for white_url in settings.AUTH_EXEMPT_PATHS:\n if re.search(white_url, request.path):\n logger.debug(\"%s path in white_url<%s>, exempting auth\", request.path, white_url)\n return None, None\n\n # app_code and app_secret\n if HEADER_APP_CODE_KEY_NAME in request.META and HEADER_APP_SECRET_KEY_NAME in request.META:\n return AppCodeAppSecretAuthentication().authenticate(request)\n\n # FIXME: should remove this totally\n # NOTE: some case we want to use token as credentials, call through APIGateway(set default headers)\n # so we should verify others first, not jwt\n if get_authorization_header(request) or request.query_params.get(TOKEN_KEY_NAME):\n # token\n return InternalTokenAuthentication().authenticate(request)\n\n # jwt\n if HEADER_JWT_KEY_NAME in request.META:\n return ESBOrAPIGatewayAuthentication().authenticate(request)\n\n raise exceptions.AuthenticationFailed(\n \"no valid authentication credentials provided! should call through APIGateway/ESB\"\n )\n", "path": "src/api/bkuser_core/enhanced_account/authentication.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nimport base64\nimport logging\nimport re\n\nimport jwt\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import caches\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import BaseAuthentication, get_authorization_header\n\nfrom bkuser_core.esb_sdk.shortcuts import get_client_by_raw_username\n\nlogger = logging.getLogger(__name__)\n\n\nHEADER_JWT_KEY_NAME = \"HTTP_X_BKAPI_JWT\"\nHEADER_APP_CODE_KEY_NAME = \"HTTP_X_BK_APP_CODE\"\nHEADER_APP_SECRET_KEY_NAME = \"HTTP_X_BK_APP_SECRET\"\n\nESB_PUBLIC_KEY_CACHE_KEY = \"bk_user:esb_public_key\"\n\nTOKEN_KEY_NAME = \"token\"\n\n\ndef create_user(username=\"admin\"):\n return get_user_model()(username=username, is_staff=True, is_superuser=True)\n\n\nclass InternalTokenAuthentication(BaseAuthentication):\n\n keyword = \"iBearer\"\n model = None\n\n query_params_keyword = TOKEN_KEY_NAME\n\n def get_token_from_query_params(self, request):\n try:\n return request.query_params[self.query_params_keyword]\n except KeyError:\n msg = f\"Invalid token header. No credentials provided. {self.query_params_keyword} is not in query params\"\n raise exceptions.AuthenticationFailed(msg)\n\n def get_token_from_header(self, request):\n auth = get_authorization_header(request).split()\n\n if not auth or auth[0].lower() != self.keyword.lower().encode():\n msg = \"Invalid token header. No credentials provided. The format should be `iBearer THE_TOKEN`\"\n raise exceptions.AuthenticationFailed(msg)\n\n if len(auth) == 1:\n msg = \"Invalid token header. No credentials provided. The size of auth array credentials is 0\"\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = (\n \"Invalid token header. Token string should not contain spaces. \"\n + \"The size of auth array credentials is more than 2\"\n )\n raise exceptions.AuthenticationFailed(msg)\n\n try:\n token = auth[1].decode()\n except UnicodeError:\n msg = \"Invalid token header. Token string should not contain invalid characters.\"\n raise exceptions.AuthenticationFailed(msg)\n\n return token\n\n def authenticate(self, request):\n try:\n token = self.get_token_from_query_params(request)\n except exceptions.AuthenticationFailed:\n logger.debug(\"no token from query params, trying to get from header instead\")\n token = self.get_token_from_header(request)\n\n return self.authenticate_credentials(token)\n\n def authenticate_credentials(self, key):\n \"\"\"Use access token to identify user\"\"\"\n if key in settings.INTERNAL_AUTH_TOKENS:\n user_info = settings.INTERNAL_AUTH_TOKENS[key]\n return create_user(user_info[\"username\"]), None\n raise exceptions.AuthenticationFailed(\n \"request failed: Invalid token header. No credentials provided or Wrong credentials.\"\n )\n\n\nclass ESBOrAPIGatewayAuthentication(BaseAuthentication):\n def authenticate(self, request):\n # get jwt from header\n jwt_content = request.META.get(HEADER_JWT_KEY_NAME, \"\")\n if not jwt_content:\n return None, None\n\n # get the public key\n jwt_header = jwt.get_unverified_header(jwt_content)\n api_name = jwt_header.get(\"kid\") or \"\"\n public_key = self._get_public_key(api_name)\n algorithm = jwt_header.get(\"alg\") or \"RS512\"\n\n # do decode, without verify issuer\n try:\n jwt_playload = jwt.decode(jwt_content, public_key, algorithm)\n except Exception: # pylint: disable=broad-except\n logger.exception(\"JWT decode failed! jwt_payload: %s, public_key: %s\", jwt_content, public_key)\n raise exceptions.AuthenticationFailed(\"decode jwt token fail\")\n\n # username = self._get_username_from_jwt_payload(payload)\n app_code = self._get_app_code_from_jwt_payload(jwt_playload)\n request.bk_app_code = app_code\n\n username = \"APIGW\" if api_name == settings.BK_APIGW_NAME else \"ESB\"\n return create_user(username), None\n\n def _get_public_key(self, api_name):\n # it's from apigateway\n if api_name == settings.BK_APIGW_NAME:\n return self._get_apigw_public_key()\n # it's from esb\n else:\n return self._get_esb_public_key()\n\n def _get_apigw_public_key(self):\n \"\"\"\n \u83b7\u53d6APIGW\u7684PUBLIC KEY\n \u7531\u4e8e\u914d\u7f6e\u6587\u4ef6\u91cc\u7684public key \u662f\u6765\u7740\u73af\u5883\u53d8\u91cf\uff0c\u4e14\u4f7f\u7528\u4e86base64\u7f16\u7801\u7684\uff0c\u6240\u4ee5\u9700\u8981\u83b7\u53d6\u540e\u89e3\u7801\n \"\"\"\n # \u5982\u679cBK_APIGW_PUBLIC_KEY\u4e3a\u7a7a\uff0c\u5219\u76f4\u63a5\u62a5\u9519\n if not settings.BK_APIGW_PUBLIC_KEY:\n raise exceptions.AuthenticationFailed(\"BK_APIGW_PUBLIC_KEY can not be empty\")\n\n # base64\u89e3\u7801\n try:\n public_key = base64.b64decode(settings.BK_APIGW_PUBLIC_KEY).decode(\"utf-8\")\n except Exception: # pylint: disable=broad-except\n logger.exception(\n \"BK_APIGW_PUBLIC_KEY is not valid base64 string! public_key=%s\", settings.BK_APIGW_PUBLIC_KEY\n )\n raise exceptions.AuthenticationFailed(\"BK_APIGW_PUBLIC_KEY is not valid base64 string!\")\n\n return public_key\n\n def _get_esb_public_key(self):\n cache = caches[\"locmem\"]\n public_key = cache.get(ESB_PUBLIC_KEY_CACHE_KEY)\n if public_key is not None:\n return public_key\n\n # get from esb\n client = get_client_by_raw_username(\"admin\")\n try:\n ret = client.esb.get_public_key()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Get ESB Public Key failed!\")\n raise exceptions.AuthenticationFailed(\"Get ESB Public Key failed!\")\n\n if not ret.get(\"result\", False):\n msg = ret.get(\"message\", \"unknown error\")\n logger.error(\"Get ESB Public Key failed! %s\", msg)\n raise exceptions.AuthenticationFailed(f\"Get ESB Public Key failed! {msg}\")\n\n public_key = ret.get(\"data\", {}).get(\"public_key\", \"\")\n if public_key is None:\n logger.error(\"Get ESB Public Key failed! public_key is empty, ret=%s\", ret)\n raise exceptions.AuthenticationFailed(\"Get ESB Public Key failed! the public key is empty\")\n\n cache.set(ESB_PUBLIC_KEY_CACHE_KEY, public_key, 60 * 60)\n return public_key\n\n def _get_app_code_from_jwt_payload(self, jwt_payload):\n \"\"\"\u4ecejwt\u91cc\u83b7\u53d6app_code\"\"\"\n app = jwt_payload.get(\"app\", {})\n\n verified = app.get(\"verified\", False)\n if not verified:\n raise exceptions.AuthenticationFailed(\"app is not verified\")\n\n app_code = app.get(\"bk_app_code\", \"\") or app.get(\"app_code\", \"\")\n # \u867d\u7136app_code\u4e3a\u7a7a\u5bf9\u4e8e\u540e\u7eed\u7684\u9274\u6743\u4e00\u5b9a\u662f\u4e0d\u901a\u8fc7\u7684\uff0c\u4f46\u9274\u6743\u4e0d\u901a\u8fc7\u6709\u5f88\u591a\u539f\u56e0\uff0c\u8fd9\u91cc\u63d0\u524dlog\u4fbf\u4e8e\u95ee\u9898\u6392\u67e5\n if not app_code:\n raise exceptions.AuthenticationFailed(\"could not get app_code from esb/apigateway jwt payload! it's empty\")\n\n return app_code\n\n\nclass AppCodeAppSecretAuthentication(BaseAuthentication):\n \"\"\"\n \u901a\u8fc7app_code\u548capp_secret\u8fdb\u884c\u9274\u6743\n \"\"\"\n\n def authenticate(self, request):\n # get app_code and app_secret from header\n app_code = request.META.get(HEADER_APP_CODE_KEY_NAME, \"\")\n app_secret = request.META.get(HEADER_APP_SECRET_KEY_NAME, \"\")\n\n if app_code == settings.APP_ID and app_secret == settings.APP_TOKEN:\n return create_user(\"SAAS\"), None\n\n return None, None\n\n\nclass MultipleAuthentication(BaseAuthentication):\n \"\"\"it's a dispatcher\"\"\"\n\n def authenticate(self, request):\n # FIXME: \u6700\u7ec8, \u4e0b\u6389token, \u53ea\u4fdd\u7559 jwt + app_code/app_secret\n # withe list\n for white_url in settings.AUTH_EXEMPT_PATHS:\n if re.search(white_url, request.path):\n logger.debug(\"%s path in white_url<%s>, exempting auth\", request.path, white_url)\n return None, None\n\n # app_code and app_secret\n if HEADER_APP_CODE_KEY_NAME in request.META and HEADER_APP_SECRET_KEY_NAME in request.META:\n return AppCodeAppSecretAuthentication().authenticate(request)\n\n # FIXME: should remove this totally\n # NOTE: some case we want to use token as credentials, call through APIGateway(set default headers)\n # so we should verify others first, not jwt\n if get_authorization_header(request) or request.query_params.get(TOKEN_KEY_NAME):\n # token\n return InternalTokenAuthentication().authenticate(request)\n\n # jwt\n if HEADER_JWT_KEY_NAME in request.META:\n return ESBOrAPIGatewayAuthentication().authenticate(request)\n\n raise exceptions.AuthenticationFailed(\n \"no valid authentication credentials provided! should call through APIGateway/ESB\"\n )\n", "path": "src/api/bkuser_core/enhanced_account/authentication.py"}]}
3,036
552
gh_patches_debug_40997
rasdani/github-patches
git_diff
openmc-dev__openmc-542
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Hosted develop branch documentation I think we should host the documentation for the develop branch somewhere. Bonus points if it automatically updates every time develop changes (could be a nightly build type thing). Extra bonus points if it also displays the documentation for open PRs. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/sphinxext/notebook_sphinxext.py` Content: ``` 1 import sys 2 import os.path 3 import re 4 import time 5 from docutils import io, nodes, statemachine, utils 6 try: 7 from docutils.utils.error_reporting import ErrorString # the new way 8 except ImportError: 9 from docutils.error_reporting import ErrorString # the old way 10 from docutils.parsers.rst import Directive, convert_directive_function 11 from docutils.parsers.rst import directives, roles, states 12 from docutils.parsers.rst.roles import set_classes 13 from docutils.transforms import misc 14 15 try: 16 from IPython.nbconver.exporters import html 17 except ImportError: 18 from IPython.nbconvert import html 19 20 21 class Notebook(Directive): 22 """Use nbconvert to insert a notebook into the environment. 23 This is based on the Raw directive in docutils 24 """ 25 required_arguments = 1 26 optional_arguments = 0 27 final_argument_whitespace = True 28 option_spec = {} 29 has_content = False 30 31 def run(self): 32 # check if raw html is supported 33 if not self.state.document.settings.raw_enabled: 34 raise self.warning('"%s" directive disabled.' % self.name) 35 36 # set up encoding 37 attributes = {'format': 'html'} 38 encoding = self.options.get( 39 'encoding', self.state.document.settings.input_encoding) 40 e_handler = self.state.document.settings.input_encoding_error_handler 41 42 # get path to notebook 43 source_dir = os.path.dirname( 44 os.path.abspath(self.state.document.current_source)) 45 nb_path = os.path.normpath(os.path.join(source_dir, 46 self.arguments[0])) 47 nb_path = utils.relative_path(None, nb_path) 48 49 # convert notebook to html 50 exporter = html.HTMLExporter(template_file='full') 51 output, resources = exporter.from_filename(nb_path) 52 header = output.split('<head>', 1)[1].split('</head>',1)[0] 53 body = output.split('<body>', 1)[1].split('</body>',1)[0] 54 55 # add HTML5 scoped attribute to header style tags 56 header = header.replace('<style', '<style scoped="scoped"') 57 header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n', 58 '') 59 header = header.replace("code,pre{", "code{") 60 61 # Filter out styles that conflict with the sphinx theme. 62 filter_strings = [ 63 'navbar', 64 'body{', 65 'alert{', 66 'uneditable-input{', 67 'collapse{', 68 ] 69 70 filter_strings.extend(['h%s{' % (i+1) for i in range(6)]) 71 72 line_begin = [ 73 'pre{', 74 'p{margin' 75 ] 76 77 filterfunc = lambda x: not any([s in x for s in filter_strings]) 78 header_lines = filter(filterfunc, header.split('\n')) 79 80 filterfunc = lambda x: not any([x.startswith(s) for s in line_begin]) 81 header_lines = filter(filterfunc, header_lines) 82 83 header = '\n'.join(header_lines) 84 85 # concatenate raw html lines 86 lines = ['<div class="ipynotebook">'] 87 lines.append(header) 88 lines.append(body) 89 lines.append('</div>') 90 text = '\n'.join(lines) 91 92 # add dependency 93 self.state.document.settings.record_dependencies.add(nb_path) 94 attributes['source'] = nb_path 95 96 # create notebook node 97 nb_node = notebook('', text, **attributes) 98 (nb_node.source, nb_node.line) = \ 99 self.state_machine.get_source_and_line(self.lineno) 100 101 return [nb_node] 102 103 104 class notebook(nodes.raw): 105 pass 106 107 108 def visit_notebook_node(self, node): 109 self.visit_raw(node) 110 111 112 def depart_notebook_node(self, node): 113 self.depart_raw(node) 114 115 116 def setup(app): 117 app.add_node(notebook, 118 html=(visit_notebook_node, depart_notebook_node)) 119 120 app.add_directive('notebook', Notebook) 121 ``` Path: `docs/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # metasci documentation build configuration file, created by 4 # sphinx-quickstart on Sun Feb 7 22:29:49 2010. 5 # 6 # This file is execfile()d with the current directory set to its containing dir. 7 # 8 # Note that not all possible configuration values are present in this 9 # autogenerated file. 10 # 11 # All configuration values have a default; values that are commented out 12 # serve to show the default. 13 14 import sys, os 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 sys.path.insert(0, os.path.abspath('../sphinxext')) 20 sys.path.insert(0, os.path.abspath('../..')) 21 22 23 # -- General configuration ----------------------------------------------------- 24 25 # Add any Sphinx extension module names here, as strings. They can be extensions 26 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 27 extensions = ['sphinx.ext.autodoc', 28 'sphinx.ext.napoleon', 29 'sphinx.ext.pngmath', 30 'sphinx.ext.autosummary', 31 'sphinxcontrib.tikz', 32 'sphinx_numfig', 33 'notebook_sphinxext'] 34 35 # Add any paths that contain templates here, relative to this directory. 36 templates_path = ['_templates'] 37 38 # The suffix of source filenames. 39 source_suffix = '.rst' 40 41 # The encoding of source files. 42 #source_encoding = 'utf-8' 43 44 # The master toctree document. 45 master_doc = 'index' 46 47 # General information about the project. 48 project = u'OpenMC' 49 copyright = u'2011-2015, Massachusetts Institute of Technology' 50 51 # The version info for the project you're documenting, acts as replacement for 52 # |version| and |release|, also used in various other places throughout the 53 # built documents. 54 # 55 # The short X.Y version. 56 version = "0.7" 57 # The full version, including alpha/beta/rc tags. 58 release = "0.7.1" 59 60 # The language for content autogenerated by Sphinx. Refer to documentation 61 # for a list of supported languages. 62 #language = None 63 64 # There are two options for replacing |today|: either, you set today to some 65 # non-false value, then it is used: 66 #today = '' 67 # Else, today_fmt is used as the format for a strftime call. 68 #today_fmt = '%B %d, %Y' 69 70 # List of documents that shouldn't be included in the build. 71 #unused_docs = [] 72 73 # List of directories, relative to source directory, that shouldn't be searched 74 # for source files. 75 exclude_trees = [] 76 77 # The reST default role (used for this markup: `text`) to use for all documents. 78 #default_role = None 79 80 # If true, '()' will be appended to :func: etc. cross-reference text. 81 #add_function_parentheses = True 82 83 # If true, the current module name will be prepended to all description 84 # unit titles (such as .. function::). 85 #add_module_names = True 86 87 # If true, sectionauthor and moduleauthor directives will be shown in the 88 # output. They are ignored by default. 89 #show_authors = False 90 91 # The name of the Pygments (syntax highlighting) style to use. 92 #pygments_style = 'sphinx' 93 #pygments_style = 'friendly' 94 #pygments_style = 'bw' 95 #pygments_style = 'fruity' 96 #pygments_style = 'manni' 97 pygments_style = 'tango' 98 99 100 # A list of ignored prefixes for module index sorting. 101 #modindex_common_prefix = [] 102 103 104 # -- Options for HTML output --------------------------------------------------- 105 106 # The theme to use for HTML and HTML Help pages. Major themes that come with 107 # Sphinx are currently 'default' and 'sphinxdoc'. 108 html_theme = 'haiku' 109 #html_theme = 'altered_nature' 110 #html_theme = 'sphinxdoc' 111 112 # Theme options are theme-specific and customize the look and feel of a theme 113 # further. For a list of options available for each theme, see the 114 # documentation. 115 html_theme_options = {'full_logo': True, 116 'linkcolor': '#0c3762', 117 'visitedlinkcolor': '#0c3762'} 118 119 # Add any paths that contain custom themes here, relative to this directory. 120 #html_theme_path = ["_theme"] 121 122 # The name for this set of Sphinx documents. If None, it defaults to 123 # "<project> v<release> documentation". 124 html_title = "OpenMC Documentation" 125 126 # A shorter title for the navigation bar. Default is the same as html_title. 127 #html_short_title = None 128 129 # The name of an image file (relative to this directory) to place at the top 130 # of the sidebar. 131 html_logo = '_images/openmc.png' 132 133 # The name of an image file (within the static path) to use as favicon of the 134 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 135 # pixels large. 136 #html_favicon = None 137 138 # Add any paths that contain custom static files (such as style sheets) here, 139 # relative to this directory. They are copied after the builtin static files, 140 # so a file named "default.css" will overwrite the builtin "default.css". 141 #html_static_path = ['_static'] 142 143 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 144 # using the given strftime format. 145 #html_last_updated_fmt = '%b %d, %Y' 146 147 # If true, SmartyPants will be used to convert quotes and dashes to 148 # typographically correct entities. 149 #html_use_smartypants = True 150 151 # Custom sidebar templates, maps document names to template names. 152 #html_sidebars = {} 153 154 # Additional templates that should be rendered to pages, maps page names to 155 # template names. 156 #html_additional_pages = {} 157 158 # If false, no module index is generated. 159 #html_use_modindex = True 160 161 # If false, no index is generated. 162 #html_use_index = True 163 164 # If true, the index is split into individual pages for each letter. 165 #html_split_index = False 166 167 # If true, links to the reST sources are added to the pages. 168 #html_show_sourcelink = True 169 170 # If true, an OpenSearch description file will be output, and all pages will 171 # contain a <link> tag referring to it. The value of this option must be the 172 # base URL from which the finished HTML is served. 173 #html_use_opensearch = '' 174 175 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 176 #html_file_suffix = '' 177 178 # Output file base name for HTML help builder. 179 htmlhelp_basename = 'openmcdoc' 180 181 182 183 # -- Options for LaTeX output -------------------------------------------------- 184 185 # The paper size ('letter' or 'a4'). 186 #latex_paper_size = 'letter' 187 188 # The font size ('10pt', '11pt' or '12pt'). 189 #latex_font_size = '10pt' 190 191 # Grouping the document tree into LaTeX files. List of tuples 192 # (source start file, target name, title, author, documentclass [howto/manual]). 193 latex_documents = [ 194 ('index', 'openmc.tex', u'OpenMC Documentation', 195 u'Massachusetts Institute of Technology', 'manual'), 196 ] 197 198 latex_elements = { 199 'preamble': r""" 200 \usepackage{enumitem} 201 \usepackage{amsfonts} 202 \usepackage{amsmath} 203 \setlistdepth{99} 204 \usepackage{tikz} 205 \usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy} 206 \usepackage{fixltx2e} 207 \hypersetup{bookmarksdepth=3} 208 \setcounter{tocdepth}{2} 209 \numberwithin{equation}{section} 210 """, 211 'printindex': r"" 212 } 213 214 # The name of an image file (relative to this directory) to place at the top of 215 # the title page. 216 #latex_logo = None 217 218 # For "manual" documents, if this is true, then toplevel headings are parts, 219 # not chapters. 220 #latex_use_parts = False 221 222 # Additional stuff for the LaTeX preamble. 223 #latex_preamble = '' 224 225 # Documents to append as an appendix to all manuals. 226 #latex_appendices = [] 227 228 # If false, no module index is generated. 229 #latex_use_modindex = True 230 231 #Autodocumentation Flags 232 #autodoc_member_order = "groupwise" 233 #autoclass_content = "both" 234 #autosummary_generate = [] 235 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,6 +13,26 @@ import sys, os +# Determine if we're on Read the Docs server +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +# On Read the Docs, we need to mock a few third-party modules so we don't get +# ImportErrors when building documentation +try: + from unittest.mock import MagicMock +except ImportError: + from mock import Mock as MagicMock + + +class Mock(MagicMock): + @classmethod + def __getattr__(cls, name): + return Mock() + +MOCK_MODULES = ['numpy', 'h5py', 'pandas', 'opencg'] +sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) + + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -26,9 +46,8 @@ # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', - 'sphinx.ext.pngmath', + 'sphinx.ext.mathjax', 'sphinx.ext.autosummary', - 'sphinxcontrib.tikz', 'sphinx_numfig', 'notebook_sphinxext'] @@ -105,16 +124,15 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'haiku' -#html_theme = 'altered_nature' -#html_theme = 'sphinxdoc' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {'full_logo': True, - 'linkcolor': '#0c3762', - 'visitedlinkcolor': '#0c3762'} +if on_rtd: + html_theme = 'default' + html_logo = '_images/openmc200px.png' +else: + html_theme = 'haiku' + html_theme_options = {'full_logo': True, + 'linkcolor': '#0c3762', + 'visitedlinkcolor': '#0c3762'} + html_logo = '_images/openmc.png' # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ["_theme"] @@ -126,10 +144,6 @@ # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -html_logo = '_images/openmc.png' - # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. diff --git a/docs/sphinxext/notebook_sphinxext.py b/docs/sphinxext/notebook_sphinxext.py --- a/docs/sphinxext/notebook_sphinxext.py +++ b/docs/sphinxext/notebook_sphinxext.py @@ -12,10 +12,7 @@ from docutils.parsers.rst.roles import set_classes from docutils.transforms import misc -try: - from IPython.nbconver.exporters import html -except ImportError: - from IPython.nbconvert import html +from nbconvert import html class Notebook(Directive):
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -13,6 +13,26 @@\n \n import sys, os\n \n+# Determine if we're on Read the Docs server\n+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n+\n+# On Read the Docs, we need to mock a few third-party modules so we don't get\n+# ImportErrors when building documentation\n+try:\n+ from unittest.mock import MagicMock\n+except ImportError:\n+ from mock import Mock as MagicMock\n+\n+\n+class Mock(MagicMock):\n+ @classmethod\n+ def __getattr__(cls, name):\n+ return Mock()\n+\n+MOCK_MODULES = ['numpy', 'h5py', 'pandas', 'opencg']\n+sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n+\n+\n # If extensions (or modules to document with autodoc) are in another directory,\n # add these directories to sys.path here. If the directory is relative to the\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n@@ -26,9 +46,8 @@\n # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\n extensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n- 'sphinx.ext.pngmath',\n+ 'sphinx.ext.mathjax',\n 'sphinx.ext.autosummary',\n- 'sphinxcontrib.tikz',\n 'sphinx_numfig',\n 'notebook_sphinxext']\n \n@@ -105,16 +124,15 @@\n \n # The theme to use for HTML and HTML Help pages. Major themes that come with\n # Sphinx are currently 'default' and 'sphinxdoc'.\n-html_theme = 'haiku'\n-#html_theme = 'altered_nature'\n-#html_theme = 'sphinxdoc'\n-\n-# Theme options are theme-specific and customize the look and feel of a theme\n-# further. For a list of options available for each theme, see the\n-# documentation.\n-html_theme_options = {'full_logo': True,\n- 'linkcolor': '#0c3762',\n- 'visitedlinkcolor': '#0c3762'}\n+if on_rtd:\n+ html_theme = 'default'\n+ html_logo = '_images/openmc200px.png'\n+else:\n+ html_theme = 'haiku'\n+ html_theme_options = {'full_logo': True,\n+ 'linkcolor': '#0c3762',\n+ 'visitedlinkcolor': '#0c3762'}\n+ html_logo = '_images/openmc.png'\n \n # Add any paths that contain custom themes here, relative to this directory.\n #html_theme_path = [\"_theme\"]\n@@ -126,10 +144,6 @@\n # A shorter title for the navigation bar. Default is the same as html_title.\n #html_short_title = None\n \n-# The name of an image file (relative to this directory) to place at the top\n-# of the sidebar.\n-html_logo = '_images/openmc.png'\n-\n # The name of an image file (within the static path) to use as favicon of the\n # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n # pixels large.\ndiff --git a/docs/sphinxext/notebook_sphinxext.py b/docs/sphinxext/notebook_sphinxext.py\n--- a/docs/sphinxext/notebook_sphinxext.py\n+++ b/docs/sphinxext/notebook_sphinxext.py\n@@ -12,10 +12,7 @@\n from docutils.parsers.rst.roles import set_classes\n from docutils.transforms import misc\n \n-try:\n- from IPython.nbconver.exporters import html\n-except ImportError:\n- from IPython.nbconvert import html\n+from nbconvert import html\n \n \n class Notebook(Directive):\n", "issue": "Hosted develop branch documentation\nI think we should host the documentation for the develop branch somewhere. Bonus points if it automatically updates every time develop changes (could be a nightly build type thing). Extra bonus points if it also displays the documentation for open PRs.\n\n", "before_files": [{"content": "import sys\nimport os.path\nimport re\nimport time\nfrom docutils import io, nodes, statemachine, utils\ntry:\n from docutils.utils.error_reporting import ErrorString # the new way\nexcept ImportError:\n from docutils.error_reporting import ErrorString # the old way\nfrom docutils.parsers.rst import Directive, convert_directive_function\nfrom docutils.parsers.rst import directives, roles, states\nfrom docutils.parsers.rst.roles import set_classes\nfrom docutils.transforms import misc\n\ntry:\n from IPython.nbconver.exporters import html\nexcept ImportError:\n from IPython.nbconvert import html\n\n\nclass Notebook(Directive):\n \"\"\"Use nbconvert to insert a notebook into the environment.\n This is based on the Raw directive in docutils\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = False\n\n def run(self):\n # check if raw html is supported\n if not self.state.document.settings.raw_enabled:\n raise self.warning('\"%s\" directive disabled.' % self.name)\n\n # set up encoding\n attributes = {'format': 'html'}\n encoding = self.options.get(\n 'encoding', self.state.document.settings.input_encoding)\n e_handler = self.state.document.settings.input_encoding_error_handler\n\n # get path to notebook\n source_dir = os.path.dirname(\n os.path.abspath(self.state.document.current_source))\n nb_path = os.path.normpath(os.path.join(source_dir,\n self.arguments[0]))\n nb_path = utils.relative_path(None, nb_path)\n\n # convert notebook to html\n exporter = html.HTMLExporter(template_file='full')\n output, resources = exporter.from_filename(nb_path)\n header = output.split('<head>', 1)[1].split('</head>',1)[0]\n body = output.split('<body>', 1)[1].split('</body>',1)[0]\n\n # add HTML5 scoped attribute to header style tags\n header = header.replace('<style', '<style scoped=\"scoped\"')\n header = header.replace('body {\\n overflow: visible;\\n padding: 8px;\\n}\\n',\n '')\n header = header.replace(\"code,pre{\", \"code{\")\n\n # Filter out styles that conflict with the sphinx theme.\n filter_strings = [\n 'navbar',\n 'body{',\n 'alert{',\n 'uneditable-input{',\n 'collapse{',\n ]\n\n filter_strings.extend(['h%s{' % (i+1) for i in range(6)])\n\n line_begin = [\n 'pre{',\n 'p{margin'\n ]\n\n filterfunc = lambda x: not any([s in x for s in filter_strings])\n header_lines = filter(filterfunc, header.split('\\n'))\n\n filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])\n header_lines = filter(filterfunc, header_lines)\n\n header = '\\n'.join(header_lines)\n\n # concatenate raw html lines\n lines = ['<div class=\"ipynotebook\">']\n lines.append(header)\n lines.append(body)\n lines.append('</div>')\n text = '\\n'.join(lines)\n\n # add dependency\n self.state.document.settings.record_dependencies.add(nb_path)\n attributes['source'] = nb_path\n\n # create notebook node\n nb_node = notebook('', text, **attributes)\n (nb_node.source, nb_node.line) = \\\n self.state_machine.get_source_and_line(self.lineno)\n\n return [nb_node]\n\n\nclass notebook(nodes.raw):\n pass\n\n\ndef visit_notebook_node(self, node):\n self.visit_raw(node)\n\n\ndef depart_notebook_node(self, node):\n self.depart_raw(node)\n\n\ndef setup(app):\n app.add_node(notebook,\n html=(visit_notebook_node, depart_notebook_node))\n\n app.add_directive('notebook', Notebook)\n", "path": "docs/sphinxext/notebook_sphinxext.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# metasci documentation build configuration file, created by\n# sphinx-quickstart on Sun Feb 7 22:29:49 2010.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../sphinxext'))\nsys.path.insert(0, os.path.abspath('../..'))\n\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.pngmath',\n 'sphinx.ext.autosummary',\n 'sphinxcontrib.tikz',\n 'sphinx_numfig',\n 'notebook_sphinxext']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'OpenMC'\ncopyright = u'2011-2015, Massachusetts Institute of Technology'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.7\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.7.1\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'sphinx'\n#pygments_style = 'friendly'\n#pygments_style = 'bw'\n#pygments_style = 'fruity'\n#pygments_style = 'manni'\npygments_style = 'tango'\n\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nhtml_theme = 'haiku'\n#html_theme = 'altered_nature'\n#html_theme = 'sphinxdoc'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {'full_logo': True,\n 'linkcolor': '#0c3762',\n 'visitedlinkcolor': '#0c3762'}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = [\"_theme\"]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = \"OpenMC Documentation\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_images/openmc.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'openmcdoc'\n\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'openmc.tex', u'OpenMC Documentation',\n u'Massachusetts Institute of Technology', 'manual'),\n]\n\nlatex_elements = {\n'preamble': r\"\"\"\n\\usepackage{enumitem}\n\\usepackage{amsfonts}\n\\usepackage{amsmath}\n\\setlistdepth{99}\n\\usepackage{tikz}\n\\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}\n\\usepackage{fixltx2e}\n\\hypersetup{bookmarksdepth=3}\n\\setcounter{tocdepth}{2}\n\\numberwithin{equation}{section}\n\"\"\",\n'printindex': r\"\"\n}\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n#Autodocumentation Flags\n#autodoc_member_order = \"groupwise\"\n#autoclass_content = \"both\"\n#autosummary_generate = []\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "import sys\nimport os.path\nimport re\nimport time\nfrom docutils import io, nodes, statemachine, utils\ntry:\n from docutils.utils.error_reporting import ErrorString # the new way\nexcept ImportError:\n from docutils.error_reporting import ErrorString # the old way\nfrom docutils.parsers.rst import Directive, convert_directive_function\nfrom docutils.parsers.rst import directives, roles, states\nfrom docutils.parsers.rst.roles import set_classes\nfrom docutils.transforms import misc\n\nfrom nbconvert import html\n\n\nclass Notebook(Directive):\n \"\"\"Use nbconvert to insert a notebook into the environment.\n This is based on the Raw directive in docutils\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = False\n\n def run(self):\n # check if raw html is supported\n if not self.state.document.settings.raw_enabled:\n raise self.warning('\"%s\" directive disabled.' % self.name)\n\n # set up encoding\n attributes = {'format': 'html'}\n encoding = self.options.get(\n 'encoding', self.state.document.settings.input_encoding)\n e_handler = self.state.document.settings.input_encoding_error_handler\n\n # get path to notebook\n source_dir = os.path.dirname(\n os.path.abspath(self.state.document.current_source))\n nb_path = os.path.normpath(os.path.join(source_dir,\n self.arguments[0]))\n nb_path = utils.relative_path(None, nb_path)\n\n # convert notebook to html\n exporter = html.HTMLExporter(template_file='full')\n output, resources = exporter.from_filename(nb_path)\n header = output.split('<head>', 1)[1].split('</head>',1)[0]\n body = output.split('<body>', 1)[1].split('</body>',1)[0]\n\n # add HTML5 scoped attribute to header style tags\n header = header.replace('<style', '<style scoped=\"scoped\"')\n header = header.replace('body {\\n overflow: visible;\\n padding: 8px;\\n}\\n',\n '')\n header = header.replace(\"code,pre{\", \"code{\")\n\n # Filter out styles that conflict with the sphinx theme.\n filter_strings = [\n 'navbar',\n 'body{',\n 'alert{',\n 'uneditable-input{',\n 'collapse{',\n ]\n\n filter_strings.extend(['h%s{' % (i+1) for i in range(6)])\n\n line_begin = [\n 'pre{',\n 'p{margin'\n ]\n\n filterfunc = lambda x: not any([s in x for s in filter_strings])\n header_lines = filter(filterfunc, header.split('\\n'))\n\n filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])\n header_lines = filter(filterfunc, header_lines)\n\n header = '\\n'.join(header_lines)\n\n # concatenate raw html lines\n lines = ['<div class=\"ipynotebook\">']\n lines.append(header)\n lines.append(body)\n lines.append('</div>')\n text = '\\n'.join(lines)\n\n # add dependency\n self.state.document.settings.record_dependencies.add(nb_path)\n attributes['source'] = nb_path\n\n # create notebook node\n nb_node = notebook('', text, **attributes)\n (nb_node.source, nb_node.line) = \\\n self.state_machine.get_source_and_line(self.lineno)\n\n return [nb_node]\n\n\nclass notebook(nodes.raw):\n pass\n\n\ndef visit_notebook_node(self, node):\n self.visit_raw(node)\n\n\ndef depart_notebook_node(self, node):\n self.depart_raw(node)\n\n\ndef setup(app):\n app.add_node(notebook,\n html=(visit_notebook_node, depart_notebook_node))\n\n app.add_directive('notebook', Notebook)\n", "path": "docs/sphinxext/notebook_sphinxext.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# metasci documentation build configuration file, created by\n# sphinx-quickstart on Sun Feb 7 22:29:49 2010.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys, os\n\n# Determine if we're on Read the Docs server\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# On Read the Docs, we need to mock a few third-party modules so we don't get\n# ImportErrors when building documentation\ntry:\n from unittest.mock import MagicMock\nexcept ImportError:\n from mock import Mock as MagicMock\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return Mock()\n\nMOCK_MODULES = ['numpy', 'h5py', 'pandas', 'opencg']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../sphinxext'))\nsys.path.insert(0, os.path.abspath('../..'))\n\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.autosummary',\n 'sphinx_numfig',\n 'notebook_sphinxext']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'OpenMC'\ncopyright = u'2011-2015, Massachusetts Institute of Technology'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.7\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.7.1\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = 'sphinx'\n#pygments_style = 'friendly'\n#pygments_style = 'bw'\n#pygments_style = 'fruity'\n#pygments_style = 'manni'\npygments_style = 'tango'\n\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\nif on_rtd:\n html_theme = 'default'\n html_logo = '_images/openmc200px.png'\nelse:\n html_theme = 'haiku'\n html_theme_options = {'full_logo': True,\n 'linkcolor': '#0c3762',\n 'visitedlinkcolor': '#0c3762'}\n html_logo = '_images/openmc.png'\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = [\"_theme\"]\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = \"OpenMC Documentation\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n#html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'openmcdoc'\n\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'openmc.tex', u'OpenMC Documentation',\n u'Massachusetts Institute of Technology', 'manual'),\n]\n\nlatex_elements = {\n'preamble': r\"\"\"\n\\usepackage{enumitem}\n\\usepackage{amsfonts}\n\\usepackage{amsmath}\n\\setlistdepth{99}\n\\usepackage{tikz}\n\\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}\n\\usepackage{fixltx2e}\n\\hypersetup{bookmarksdepth=3}\n\\setcounter{tocdepth}{2}\n\\numberwithin{equation}{section}\n\"\"\",\n'printindex': r\"\"\n}\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n#Autodocumentation Flags\n#autodoc_member_order = \"groupwise\"\n#autoclass_content = \"both\"\n#autosummary_generate = []\n", "path": "docs/source/conf.py"}]}
3,970
892
gh_patches_debug_14672
rasdani/github-patches
git_diff
Flexget__Flexget-2212
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Inaccurate exists_series reject message The original issue was solved on forums: https://discuss.flexget.com/t/quality-upgrade-doesnt-working/3642 but I've been asked to add an issue here because of the _inaccurate error message_. ### Expected behaviour: Download better quality <!--- Please don't just say "it doesn't crash" or "it works". Explain what the expected result is. ---> ### Actual behaviour: Rejecting by exists_series plugin because proper already exists (but there's no proper release) #### Config: ``` tv: series: settings: tvhd: quality: 720p+ !h265 !10bit propers: 15 hours upgrade: no specials: no special_ids: e00 set: path: /TV/{{series_name}}/Season {{series_season}} include_subs: yes tvhd: - Killjoys: upgrade: yes exists_series: - /TV/{{series_name}} ``` #### Log: ``` 2017-08-01 11:03 VERBOSE task RARBG ACCEPTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by series plugin because is an upgrade to existing quality 2017-08-01 11:03 VERBOSE task RARBG REJECTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by exists_series plugin because proper already exists ``` ### Additional information: - FlexGet version: 2.10.74 - Python version: 2.7.9 - Installation method: pip - Using daemon (yes/no): yes - OS and version: Ubuntu 15.04 <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6. - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> Inaccurate exists_series reject message The original issue was solved on forums: https://discuss.flexget.com/t/quality-upgrade-doesnt-working/3642 but I've been asked to add an issue here because of the _inaccurate error message_. ### Expected behaviour: Download better quality <!--- Please don't just say "it doesn't crash" or "it works". Explain what the expected result is. ---> ### Actual behaviour: Rejecting by exists_series plugin because proper already exists (but there's no proper release) #### Config: ``` tv: series: settings: tvhd: quality: 720p+ !h265 !10bit propers: 15 hours upgrade: no specials: no special_ids: e00 set: path: /TV/{{series_name}}/Season {{series_season}} include_subs: yes tvhd: - Killjoys: upgrade: yes exists_series: - /TV/{{series_name}} ``` #### Log: ``` 2017-08-01 11:03 VERBOSE task RARBG ACCEPTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by series plugin because is an upgrade to existing quality 2017-08-01 11:03 VERBOSE task RARBG REJECTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by exists_series plugin because proper already exists ``` ### Additional information: - FlexGet version: 2.10.74 - Python version: 2.7.9 - Installation method: pip - Using daemon (yes/no): yes - OS and version: Ubuntu 15.04 <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6. - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/filter/exists_series.py` Content: ``` 1 from __future__ import unicode_literals, division, absolute_import 2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin 3 from past.builtins import basestring 4 5 import logging 6 7 from path import Path 8 9 from flexget import plugin 10 from flexget.event import event 11 from flexget.config_schema import one_or_more 12 from flexget.utils.log import log_once 13 from flexget.utils.template import RenderError 14 from flexget.plugins.parsers import ParseWarning 15 from flexget.plugin import get_plugin_by_name 16 17 log = logging.getLogger('exists_series') 18 19 20 class FilterExistsSeries(object): 21 """ 22 Intelligent series aware exists rejecting. 23 24 Example:: 25 26 exists_series: /storage/series/ 27 """ 28 29 schema = { 30 'anyOf': [ 31 one_or_more({'type': 'string', 'format': 'path'}), 32 { 33 'type': 'object', 34 'properties': { 35 'path': one_or_more({'type': 'string', 'format': 'path'}), 36 'allow_different_qualities': {'enum': ['better', True, False], 'default': False} 37 }, 38 'required': ['path'], 39 'additionalProperties': False 40 } 41 ] 42 } 43 44 def prepare_config(self, config): 45 # if config is not a dict, assign value to 'path' key 46 if not isinstance(config, dict): 47 config = {'path': config} 48 # if only a single path is passed turn it into a 1 element list 49 if isinstance(config['path'], basestring): 50 config['path'] = [config['path']] 51 return config 52 53 @plugin.priority(-1) 54 def on_task_filter(self, task, config): 55 if not task.accepted: 56 log.debug('Scanning not needed') 57 return 58 config = self.prepare_config(config) 59 accepted_series = {} 60 paths = set() 61 for entry in task.accepted: 62 if 'series_parser' in entry: 63 if entry['series_parser'].valid: 64 accepted_series.setdefault(entry['series_parser'].name, []).append(entry) 65 for folder in config['path']: 66 try: 67 paths.add(entry.render(folder)) 68 except RenderError as e: 69 log.error('Error rendering path `%s`: %s', folder, e) 70 else: 71 log.debug('entry %s series_parser invalid', entry['title']) 72 if not accepted_series: 73 log.warning('No accepted entries have series information. exists_series cannot filter them') 74 return 75 76 # scan through 77 # For speed, only test accepted entries since our priority should be after everything is accepted. 78 for series in accepted_series: 79 # make new parser from parser in entry 80 series_parser = accepted_series[series][0]['series_parser'] 81 for folder in paths: 82 folder = Path(folder).expanduser() 83 if not folder.isdir(): 84 log.warning('Directory %s does not exist', folder) 85 continue 86 87 for filename in folder.walk(errors='ignore'): 88 # run parser on filename data 89 try: 90 disk_parser = get_plugin_by_name('parsing').instance.parse_series(data=filename.name, 91 name=series_parser.name) 92 except ParseWarning as pw: 93 disk_parser = pw.parsed 94 log_once(pw.value, logger=log) 95 if disk_parser.valid: 96 log.debug('name %s is same series as %s', filename.name, series) 97 log.debug('disk_parser.identifier = %s', disk_parser.identifier) 98 log.debug('disk_parser.quality = %s', disk_parser.quality) 99 log.debug('disk_parser.proper_count = %s', disk_parser.proper_count) 100 101 for entry in accepted_series[series]: 102 log.debug('series_parser.identifier = %s', entry['series_parser'].identifier) 103 if disk_parser.identifier != entry['series_parser'].identifier: 104 log.trace('wrong identifier') 105 continue 106 log.debug('series_parser.quality = %s', entry['series_parser'].quality) 107 if config.get('allow_different_qualities') == 'better': 108 if entry['series_parser'].quality > disk_parser.quality: 109 log.trace('better quality') 110 continue 111 elif config.get('allow_different_qualities'): 112 if disk_parser.quality != entry['series_parser'].quality: 113 log.trace('wrong quality') 114 continue 115 log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count) 116 if disk_parser.proper_count >= entry['series_parser'].proper_count: 117 entry.reject('proper already exists') 118 continue 119 else: 120 log.trace('new one is better proper, allowing') 121 continue 122 123 124 @event('plugin.register') 125 def register_plugin(): 126 plugin.register(FilterExistsSeries, 'exists_series', interfaces=['task'], api_ver=2) 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/filter/exists_series.py b/flexget/plugins/filter/exists_series.py --- a/flexget/plugins/filter/exists_series.py +++ b/flexget/plugins/filter/exists_series.py @@ -114,7 +114,7 @@ continue log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count) if disk_parser.proper_count >= entry['series_parser'].proper_count: - entry.reject('proper already exists') + entry.reject('episode already exists') continue else: log.trace('new one is better proper, allowing')
{"golden_diff": "diff --git a/flexget/plugins/filter/exists_series.py b/flexget/plugins/filter/exists_series.py\n--- a/flexget/plugins/filter/exists_series.py\n+++ b/flexget/plugins/filter/exists_series.py\n@@ -114,7 +114,7 @@\n continue\n log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)\n if disk_parser.proper_count >= entry['series_parser'].proper_count:\n- entry.reject('proper already exists')\n+ entry.reject('episode already exists')\n continue\n else:\n log.trace('new one is better proper, allowing')\n", "issue": "Inaccurate exists_series reject message\nThe original issue was solved on forums: https://discuss.flexget.com/t/quality-upgrade-doesnt-working/3642 but I've been asked to add an issue here because of the _inaccurate error message_.\r\n### Expected behaviour:\r\nDownload better quality\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nRejecting by exists_series plugin because proper already exists (but there's no proper release)\r\n \r\n#### Config:\r\n```\r\n tv:\r\n series:\r\n settings:\r\n tvhd:\r\n quality: 720p+ !h265 !10bit\r\n propers: 15 hours\r\n upgrade: no\r\n specials: no\r\n special_ids: e00\r\n set:\r\n path: /TV/{{series_name}}/Season {{series_season}}\r\n include_subs: yes\r\n tvhd: \r\n - Killjoys:\r\n upgrade: yes\r\n exists_series: \r\n - /TV/{{series_name}}\r\n```\r\n \r\n#### Log:\r\n```\r\n2017-08-01 11:03 VERBOSE task RARBG ACCEPTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by series plugin because is an upgrade to existing quality\r\n2017-08-01 11:03 VERBOSE task RARBG REJECTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by exists_series plugin because proper already exists\r\n\r\n```\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.10.74\r\n- Python version: 2.7.9\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: Ubuntu 15.04\r\n\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\nInaccurate exists_series reject message\nThe original issue was solved on forums: https://discuss.flexget.com/t/quality-upgrade-doesnt-working/3642 but I've been asked to add an issue here because of the _inaccurate error message_.\r\n### Expected behaviour:\r\nDownload better quality\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nRejecting by exists_series plugin because proper already exists (but there's no proper release)\r\n \r\n#### Config:\r\n```\r\n tv:\r\n series:\r\n settings:\r\n tvhd:\r\n quality: 720p+ !h265 !10bit\r\n propers: 15 hours\r\n upgrade: no\r\n specials: no\r\n special_ids: e00\r\n set:\r\n path: /TV/{{series_name}}/Season {{series_season}}\r\n include_subs: yes\r\n tvhd: \r\n - Killjoys:\r\n upgrade: yes\r\n exists_series: \r\n - /TV/{{series_name}}\r\n```\r\n \r\n#### Log:\r\n```\r\n2017-08-01 11:03 VERBOSE task RARBG ACCEPTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by series plugin because is an upgrade to existing quality\r\n2017-08-01 11:03 VERBOSE task RARBG REJECTED: `Killjoys.S03E05.Attack.the.Rack.1080p.WEBRip.DDP5.1.x264-GRP` by exists_series plugin because proper already exists\r\n\r\n```\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.10.74\r\n- Python version: 2.7.9\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: Ubuntu 15.04\r\n\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom past.builtins import basestring\n\nimport logging\n\nfrom path import Path\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.config_schema import one_or_more\nfrom flexget.utils.log import log_once\nfrom flexget.utils.template import RenderError\nfrom flexget.plugins.parsers import ParseWarning\nfrom flexget.plugin import get_plugin_by_name\n\nlog = logging.getLogger('exists_series')\n\n\nclass FilterExistsSeries(object):\n \"\"\"\n Intelligent series aware exists rejecting.\n\n Example::\n\n exists_series: /storage/series/\n \"\"\"\n\n schema = {\n 'anyOf': [\n one_or_more({'type': 'string', 'format': 'path'}),\n {\n 'type': 'object',\n 'properties': {\n 'path': one_or_more({'type': 'string', 'format': 'path'}),\n 'allow_different_qualities': {'enum': ['better', True, False], 'default': False}\n },\n 'required': ['path'],\n 'additionalProperties': False\n }\n ]\n }\n\n def prepare_config(self, config):\n # if config is not a dict, assign value to 'path' key\n if not isinstance(config, dict):\n config = {'path': config}\n # if only a single path is passed turn it into a 1 element list\n if isinstance(config['path'], basestring):\n config['path'] = [config['path']]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('Scanning not needed')\n return\n config = self.prepare_config(config)\n accepted_series = {}\n paths = set()\n for entry in task.accepted:\n if 'series_parser' in entry:\n if entry['series_parser'].valid:\n accepted_series.setdefault(entry['series_parser'].name, []).append(entry)\n for folder in config['path']:\n try:\n paths.add(entry.render(folder))\n except RenderError as e:\n log.error('Error rendering path `%s`: %s', folder, e)\n else:\n log.debug('entry %s series_parser invalid', entry['title'])\n if not accepted_series:\n log.warning('No accepted entries have series information. exists_series cannot filter them')\n return\n\n # scan through\n # For speed, only test accepted entries since our priority should be after everything is accepted.\n for series in accepted_series:\n # make new parser from parser in entry\n series_parser = accepted_series[series][0]['series_parser']\n for folder in paths:\n folder = Path(folder).expanduser()\n if not folder.isdir():\n log.warning('Directory %s does not exist', folder)\n continue\n\n for filename in folder.walk(errors='ignore'):\n # run parser on filename data\n try:\n disk_parser = get_plugin_by_name('parsing').instance.parse_series(data=filename.name,\n name=series_parser.name)\n except ParseWarning as pw:\n disk_parser = pw.parsed\n log_once(pw.value, logger=log)\n if disk_parser.valid:\n log.debug('name %s is same series as %s', filename.name, series)\n log.debug('disk_parser.identifier = %s', disk_parser.identifier)\n log.debug('disk_parser.quality = %s', disk_parser.quality)\n log.debug('disk_parser.proper_count = %s', disk_parser.proper_count)\n\n for entry in accepted_series[series]:\n log.debug('series_parser.identifier = %s', entry['series_parser'].identifier)\n if disk_parser.identifier != entry['series_parser'].identifier:\n log.trace('wrong identifier')\n continue\n log.debug('series_parser.quality = %s', entry['series_parser'].quality)\n if config.get('allow_different_qualities') == 'better':\n if entry['series_parser'].quality > disk_parser.quality:\n log.trace('better quality')\n continue\n elif config.get('allow_different_qualities'):\n if disk_parser.quality != entry['series_parser'].quality:\n log.trace('wrong quality')\n continue\n log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)\n if disk_parser.proper_count >= entry['series_parser'].proper_count:\n entry.reject('proper already exists')\n continue\n else:\n log.trace('new one is better proper, allowing')\n continue\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExistsSeries, 'exists_series', interfaces=['task'], api_ver=2)\n", "path": "flexget/plugins/filter/exists_series.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom past.builtins import basestring\n\nimport logging\n\nfrom path import Path\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.config_schema import one_or_more\nfrom flexget.utils.log import log_once\nfrom flexget.utils.template import RenderError\nfrom flexget.plugins.parsers import ParseWarning\nfrom flexget.plugin import get_plugin_by_name\n\nlog = logging.getLogger('exists_series')\n\n\nclass FilterExistsSeries(object):\n \"\"\"\n Intelligent series aware exists rejecting.\n\n Example::\n\n exists_series: /storage/series/\n \"\"\"\n\n schema = {\n 'anyOf': [\n one_or_more({'type': 'string', 'format': 'path'}),\n {\n 'type': 'object',\n 'properties': {\n 'path': one_or_more({'type': 'string', 'format': 'path'}),\n 'allow_different_qualities': {'enum': ['better', True, False], 'default': False}\n },\n 'required': ['path'],\n 'additionalProperties': False\n }\n ]\n }\n\n def prepare_config(self, config):\n # if config is not a dict, assign value to 'path' key\n if not isinstance(config, dict):\n config = {'path': config}\n # if only a single path is passed turn it into a 1 element list\n if isinstance(config['path'], basestring):\n config['path'] = [config['path']]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('Scanning not needed')\n return\n config = self.prepare_config(config)\n accepted_series = {}\n paths = set()\n for entry in task.accepted:\n if 'series_parser' in entry:\n if entry['series_parser'].valid:\n accepted_series.setdefault(entry['series_parser'].name, []).append(entry)\n for folder in config['path']:\n try:\n paths.add(entry.render(folder))\n except RenderError as e:\n log.error('Error rendering path `%s`: %s', folder, e)\n else:\n log.debug('entry %s series_parser invalid', entry['title'])\n if not accepted_series:\n log.warning('No accepted entries have series information. exists_series cannot filter them')\n return\n\n # scan through\n # For speed, only test accepted entries since our priority should be after everything is accepted.\n for series in accepted_series:\n # make new parser from parser in entry\n series_parser = accepted_series[series][0]['series_parser']\n for folder in paths:\n folder = Path(folder).expanduser()\n if not folder.isdir():\n log.warning('Directory %s does not exist', folder)\n continue\n\n for filename in folder.walk(errors='ignore'):\n # run parser on filename data\n try:\n disk_parser = get_plugin_by_name('parsing').instance.parse_series(data=filename.name,\n name=series_parser.name)\n except ParseWarning as pw:\n disk_parser = pw.parsed\n log_once(pw.value, logger=log)\n if disk_parser.valid:\n log.debug('name %s is same series as %s', filename.name, series)\n log.debug('disk_parser.identifier = %s', disk_parser.identifier)\n log.debug('disk_parser.quality = %s', disk_parser.quality)\n log.debug('disk_parser.proper_count = %s', disk_parser.proper_count)\n\n for entry in accepted_series[series]:\n log.debug('series_parser.identifier = %s', entry['series_parser'].identifier)\n if disk_parser.identifier != entry['series_parser'].identifier:\n log.trace('wrong identifier')\n continue\n log.debug('series_parser.quality = %s', entry['series_parser'].quality)\n if config.get('allow_different_qualities') == 'better':\n if entry['series_parser'].quality > disk_parser.quality:\n log.trace('better quality')\n continue\n elif config.get('allow_different_qualities'):\n if disk_parser.quality != entry['series_parser'].quality:\n log.trace('wrong quality')\n continue\n log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)\n if disk_parser.proper_count >= entry['series_parser'].proper_count:\n entry.reject('episode already exists')\n continue\n else:\n log.trace('new one is better proper, allowing')\n continue\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExistsSeries, 'exists_series', interfaces=['task'], api_ver=2)\n", "path": "flexget/plugins/filter/exists_series.py"}]}
2,940
138
gh_patches_debug_24036
rasdani/github-patches
git_diff
DistrictDataLabs__yellowbrick-371
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error from KElbowVisualizer's distortion_score method `KElbowVisualizer` is generating a `ValueError` from Sklearn. ### Issue Using the Yellowbrick `hobbies` corpus and vectorizing with TFIDF, the KElbowVisualizer is generating an error from Sklearn: `ValueError: Found array with dim 3. check_pairwise_arrays expected <= 2`. The `distortion_score` method appears to be causing the error when calling Sklearn's `pairwise_distances` method. ### Code ``` corpus = load_corpus('hobbies') tfidf = TfidfVectorizer() docs = tfidf.fit_transform(corpus) visualizer = KElbowVisualizer(KMeans(), k=(4, 8)) visualizer.fit(docs) visualizer.poof() ``` ### Error ``` Traceback (most recent call last): File "elbows.py", line 82, in <module> visualizer.fit(docs) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/yellowbrick/cluster/elbow.py", line 245, in fit self.scoring_metric(X, self.estimator.labels_) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/yellowbrick/cluster/elbow.py", line 87, in distortion_score distances = pairwise_distances(instances, [center], metric=metric) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py", line 1247, in pairwise_distances return _parallel_pairwise(X, Y, func, n_jobs, **kwds) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py", line 1090, in _parallel_pairwise return func(X, Y, **kwds) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py", line 223, in euclidean_distances X, Y = check_pairwise_arrays(X, Y) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py", line 112, in check_pairwise_arrays warn_on_dtype=warn_on_dtype, estimator=estimator) File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/utils/validation.py", line 451, in check_array % (array.ndim, estimator_name)) ValueError: Found array with dim 3. check_pairwise_arrays expected <= 2. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yellowbrick/cluster/elbow.py` Content: ``` 1 # yellowbrick.cluster.elbow 2 # Implements the elbow method for determining the optimal number of clusters. 3 # 4 # Author: Benjamin Bengfort <[email protected]> 5 # Created: Thu Mar 23 22:36:31 2017 -0400 6 # 7 # Copyright (C) 2016 District Data Labs 8 # For license information, see LICENSE.txt 9 # 10 # ID: elbow.py [5a370c8] [email protected] $ 11 12 """ 13 Implements the elbow method for determining the optimal number of clusters. 14 https://bl.ocks.org/rpgove/0060ff3b656618e9136b 15 """ 16 17 ########################################################################## 18 ## Imports 19 ########################################################################## 20 21 import time 22 23 from .base import ClusteringScoreVisualizer 24 from ..exceptions import YellowbrickValueError 25 26 from sklearn.metrics import silhouette_score 27 from sklearn.metrics import calinski_harabaz_score 28 from sklearn.metrics.pairwise import pairwise_distances 29 from sklearn.preprocessing import LabelEncoder 30 31 32 ## Packages for export 33 __all__ = [ 34 "KElbowVisualizer", "distortion_score" 35 ] 36 37 38 ########################################################################## 39 ## Metrics 40 ########################################################################## 41 42 def distortion_score(X, labels, metric='euclidean'): 43 """ 44 Compute the mean distortion of all samples. 45 46 The distortion is computed as the the sum of the squared distances between 47 each observation and its closest centroid. Logically, this is the metric 48 that K-Means attempts to minimize as it is fitting the model. 49 50 .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/ 51 52 Parameters 53 ---------- 54 X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a] 55 Array of pairwise distances between samples if metric == "precomputed" 56 or a feature array for computing distances against the labels. 57 58 labels : array, shape = [n_samples] 59 Predicted labels for each sample 60 61 metric : string 62 The metric to use when calculating distance between instances in a 63 feature array. If metric is a string, it must be one of the options 64 allowed by `sklearn.metrics.pairwise.pairwise_distances 65 <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_ 66 67 .. todo:: add sample_size and random_state kwds similar to silhouette_score 68 """ 69 # Encode labels to get unique centers and groups 70 le = LabelEncoder() 71 labels = le.fit_transform(labels) 72 unique_labels = le.classes_ 73 74 # Sum of the distortions 75 distortion = 0 76 77 # Loop through each label (center) to compute the centroid 78 for current_label in unique_labels: 79 # Mask the instances that belong to the current label 80 mask = labels == current_label 81 instances = X[mask] 82 83 # Compute the center of these instances 84 center = instances.mean(axis=0) 85 86 # Compute the square distances from the instances to the center 87 distances = pairwise_distances(instances, [center], metric=metric) 88 distances = distances ** 2 89 90 # Add the mean square distance to the distortion 91 distortion += distances.mean() 92 93 return distortion 94 95 96 ########################################################################## 97 ## Elbow Method 98 ########################################################################## 99 100 KELBOW_SCOREMAP = { 101 "distortion": distortion_score, 102 "silhouette": silhouette_score, 103 "calinski_harabaz": calinski_harabaz_score, 104 } 105 106 107 class KElbowVisualizer(ClusteringScoreVisualizer): 108 """ 109 The K-Elbow Visualizer implements the "elbow" method of selecting the 110 optimal number of clusters for K-means clustering. K-means is a simple 111 unsupervised machine learning algorithm that groups data into a specified 112 number (k) of clusters. Because the user must specify in advance what k to 113 choose, the algorithm is somewhat naive -- it assigns all members to k 114 clusters even if that is not the right k for the dataset. 115 116 The elbow method runs k-means clustering on the dataset for a range of 117 values for k (say from 1-10) and then for each value of k computes an 118 average score for all clusters. By default, the ``distortion_score`` is 119 computed, the sum of square distances from each point to its assigned 120 center. Other metrics can also be used such as the ``silhouette_score``, 121 the mean silhouette coefficient for all samples or the 122 ``calinski_harabaz_score``, which computes the ratio of dispersion between 123 and within clusters. 124 125 When these overall metrics for each model are plotted, it is possible to 126 visually determine the best value for K. If the line chart looks like an 127 arm, then the "elbow" (the point of inflection on the curve) is the best 128 value of k. The "arm" can be either up or down, but if there is a strong 129 inflection point, it is a good indication that the underlying model fits 130 best at that point. 131 132 Parameters 133 ---------- 134 135 model : a Scikit-Learn clusterer 136 Should be an instance of a clusterer, specifically ``KMeans`` or 137 ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised. 138 139 ax : matplotlib Axes, default: None 140 The axes to plot the figure on. If None is passed in the current axes 141 will be used (or generated if required). 142 143 k : integer or tuple 144 The range of k to compute silhouette scores for. If a single integer 145 is specified, then will compute the range (2,k) otherwise the 146 specified range in the tuple is used. 147 148 metric : string, default: ``"distortion"`` 149 Select the scoring metric to evaluate the clusters. The default is the 150 mean distortion, defined by the sum of squared distances between each 151 observation and its closest centroid. Other metrics include: 152 153 - **distortion**: mean sum of squared distances to centers 154 - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance 155 - **calinski_harabaz**: ratio of within to between cluster dispersion 156 157 timings : bool, default: True 158 Display the fitting time per k to evaluate the amount of time required 159 to train the clustering model. 160 161 kwargs : dict 162 Keyword arguments that are passed to the base class and may influence 163 the visualization as defined in other Visualizers. 164 165 Examples 166 -------- 167 168 >>> from yellowbrick.cluster import KElbowVisualizer 169 >>> from sklearn.cluster import KMeans 170 >>> model = KElbowVisualizer(KMeans(), k=10) 171 >>> model.fit(X) 172 >>> model.poof() 173 174 Notes 175 ----- 176 177 If you get a visualizer that doesn't have an elbow or inflection point, 178 then this method may not be working. The elbow method does not work well 179 if the data is not very clustered; in this case you might see a smooth 180 curve and the value of k is unclear. Other scoring methods such as BIC or 181 SSE also can be used to explore if clustering is a correct choice. 182 183 For a discussion on the Elbow method, read more at 184 `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_. 185 186 .. todo:: add parallelization option for performance 187 .. todo:: add different metrics for scores and silhoutte 188 .. todo:: add timing information about how long its taking 189 """ 190 191 def __init__(self, model, ax=None, k=10, 192 metric="distortion", timings=True, **kwargs): 193 super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs) 194 195 # Get the scoring method 196 if metric not in KELBOW_SCOREMAP: 197 raise YellowbrickValueError( 198 "'{}' is not a defined metric " 199 "use one of distortion, silhouette, or calinski_harabaz" 200 ) 201 202 # Store the arguments 203 self.scoring_metric = KELBOW_SCOREMAP[metric] 204 self.timings = timings 205 206 # Convert K into a tuple argument if an integer 207 if isinstance(k, int): 208 k = (2, k+1) 209 210 # Expand k in to the values we will use, capturing exceptions 211 try: 212 k = tuple(k) 213 self.k_values_ = list(range(*k)) 214 except: 215 raise YellowbrickValueError(( 216 "Specify a range or maximal K value, the value '{}' " 217 "is not a valid argument for K.".format(k) 218 )) 219 220 221 # Holds the values of the silhoutte scores 222 self.k_scores_ = None 223 224 def fit(self, X, y=None, **kwargs): 225 """ 226 Fits n KMeans models where n is the length of ``self.k_values_``, 227 storing the silhoutte scores in the ``self.k_scores_`` attribute. 228 This method finishes up by calling draw to create the plot. 229 """ 230 231 self.k_scores_ = [] 232 self.k_timers_ = [] 233 234 for k in self.k_values_: 235 # Compute the start time for each model 236 start = time.time() 237 238 # Set the k value and fit the model 239 self.estimator.set_params(n_clusters=k) 240 self.estimator.fit(X) 241 242 # Append the time and score to our plottable metrics 243 self.k_timers_.append(time.time() - start) 244 self.k_scores_.append( 245 self.scoring_metric(X, self.estimator.labels_) 246 ) 247 248 self.draw() 249 250 return self 251 252 def draw(self): 253 """ 254 Draw the elbow curve for the specified scores and values of K. 255 """ 256 # Plot the silhouette score against k 257 self.ax.plot(self.k_values_, self.k_scores_, marker="D", label="score") 258 259 # If we're going to plot the timings, create a twinx axis 260 if self.timings: 261 self.axes = [self.ax, self.ax.twinx()] 262 self.axes[1].plot( 263 self.k_values_, self.k_timers_, label="fit time", 264 c='g', marker="o", linestyle="--", alpha=0.75, 265 ) 266 267 return self.ax 268 269 def finalize(self): 270 """ 271 Prepare the figure for rendering by setting the title as well as the 272 X and Y axis labels and adding the legend. 273 """ 274 # Get the metric name 275 metric = self.scoring_metric.__name__.replace("_", " ").title() 276 277 # Set the title 278 self.set_title( 279 '{} Elbow for {} Clustering'.format(metric, self.name) 280 ) 281 282 # Set the x and y labels 283 self.ax.set_xlabel('k') 284 self.ax.set_ylabel(metric.lower()) 285 286 # Set the second y axis labels 287 if self.timings: 288 self.axes[1].grid(False) 289 self.axes[1].set_ylabel("fit time (seconds)", color='g') 290 self.axes[1].tick_params('y', colors='g') 291 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yellowbrick/cluster/elbow.py b/yellowbrick/cluster/elbow.py --- a/yellowbrick/cluster/elbow.py +++ b/yellowbrick/cluster/elbow.py @@ -19,6 +19,8 @@ ########################################################################## import time +import numpy as np +import scipy.sparse as sp from .base import ClusteringScoreVisualizer from ..exceptions import YellowbrickValueError @@ -83,8 +85,16 @@ # Compute the center of these instances center = instances.mean(axis=0) + # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean + # returns an array of 1 dimension less than the input. We expect + # instances to be a 2D array, therefore to do pairwise computation we + # require center to be a 2D array with a single row (the center). + # See #370 for more detail. + if not sp.issparse(instances): + center = np.array([center]) + # Compute the square distances from the instances to the center - distances = pairwise_distances(instances, [center], metric=metric) + distances = pairwise_distances(instances, center, metric=metric) distances = distances ** 2 # Add the mean square distance to the distortion
{"golden_diff": "diff --git a/yellowbrick/cluster/elbow.py b/yellowbrick/cluster/elbow.py\n--- a/yellowbrick/cluster/elbow.py\n+++ b/yellowbrick/cluster/elbow.py\n@@ -19,6 +19,8 @@\n ##########################################################################\n \n import time\n+import numpy as np\n+import scipy.sparse as sp\n \n from .base import ClusteringScoreVisualizer\n from ..exceptions import YellowbrickValueError\n@@ -83,8 +85,16 @@\n # Compute the center of these instances\n center = instances.mean(axis=0)\n \n+ # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean\n+ # returns an array of 1 dimension less than the input. We expect\n+ # instances to be a 2D array, therefore to do pairwise computation we\n+ # require center to be a 2D array with a single row (the center).\n+ # See #370 for more detail.\n+ if not sp.issparse(instances):\n+ center = np.array([center])\n+\n # Compute the square distances from the instances to the center\n- distances = pairwise_distances(instances, [center], metric=metric)\n+ distances = pairwise_distances(instances, center, metric=metric)\n distances = distances ** 2\n \n # Add the mean square distance to the distortion\n", "issue": "Error from KElbowVisualizer's distortion_score method\n`KElbowVisualizer` is generating a `ValueError` from Sklearn.\r\n\r\n### Issue\r\n\r\nUsing the Yellowbrick `hobbies` corpus and vectorizing with TFIDF, the KElbowVisualizer is generating an error from Sklearn: `ValueError: Found array with dim 3. check_pairwise_arrays expected <= 2`. The `distortion_score` method appears to be causing the error when calling Sklearn's `pairwise_distances` method.\r\n\r\n### Code\r\n```\r\ncorpus = load_corpus('hobbies')\r\ntfidf = TfidfVectorizer()\r\ndocs = tfidf.fit_transform(corpus)\r\nvisualizer = KElbowVisualizer(KMeans(), k=(4, 8))\r\nvisualizer.fit(docs)\r\nvisualizer.poof()\r\n```\r\n\r\n### Error\r\n```\r\nTraceback (most recent call last):\r\n File \"elbows.py\", line 82, in <module>\r\n visualizer.fit(docs)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/yellowbrick/cluster/elbow.py\", line 245, in fit\r\n self.scoring_metric(X, self.estimator.labels_)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/yellowbrick/cluster/elbow.py\", line 87, in distortion_score\r\n distances = pairwise_distances(instances, [center], metric=metric)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py\", line 1247, in pairwise_distances\r\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py\", line 1090, in _parallel_pairwise\r\n return func(X, Y, **kwds)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py\", line 223, in euclidean_distances\r\n X, Y = check_pairwise_arrays(X, Y)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/metrics/pairwise.py\", line 112, in check_pairwise_arrays\r\n warn_on_dtype=warn_on_dtype, estimator=estimator)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/utils/validation.py\", line 451, in check_array\r\n % (array.ndim, estimator_name))\r\nValueError: Found array with dim 3. check_pairwise_arrays expected <= 2.\r\n```\n", "before_files": [{"content": "# yellowbrick.cluster.elbow\n# Implements the elbow method for determining the optimal number of clusters.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Mar 23 22:36:31 2017 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: elbow.py [5a370c8] [email protected] $\n\n\"\"\"\nImplements the elbow method for determining the optimal number of clusters.\nhttps://bl.ocks.org/rpgove/0060ff3b656618e9136b\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport time\n\nfrom .base import ClusteringScoreVisualizer\nfrom ..exceptions import YellowbrickValueError\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import calinski_harabaz_score\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import LabelEncoder\n\n\n## Packages for export\n__all__ = [\n \"KElbowVisualizer\", \"distortion_score\"\n]\n\n\n##########################################################################\n## Metrics\n##########################################################################\n\ndef distortion_score(X, labels, metric='euclidean'):\n \"\"\"\n Compute the mean distortion of all samples.\n\n The distortion is computed as the the sum of the squared distances between\n each observation and its closest centroid. Logically, this is the metric\n that K-Means attempts to minimize as it is fitting the model.\n\n .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/\n\n Parameters\n ----------\n X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]\n Array of pairwise distances between samples if metric == \"precomputed\"\n or a feature array for computing distances against the labels.\n\n labels : array, shape = [n_samples]\n Predicted labels for each sample\n\n metric : string\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by `sklearn.metrics.pairwise.pairwise_distances\n <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_\n\n .. todo:: add sample_size and random_state kwds similar to silhouette_score\n \"\"\"\n # Encode labels to get unique centers and groups\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n unique_labels = le.classes_\n\n # Sum of the distortions\n distortion = 0\n\n # Loop through each label (center) to compute the centroid\n for current_label in unique_labels:\n # Mask the instances that belong to the current label\n mask = labels == current_label\n instances = X[mask]\n\n # Compute the center of these instances\n center = instances.mean(axis=0)\n\n # Compute the square distances from the instances to the center\n distances = pairwise_distances(instances, [center], metric=metric)\n distances = distances ** 2\n\n # Add the mean square distance to the distortion\n distortion += distances.mean()\n\n return distortion\n\n\n##########################################################################\n## Elbow Method\n##########################################################################\n\nKELBOW_SCOREMAP = {\n \"distortion\": distortion_score,\n \"silhouette\": silhouette_score,\n \"calinski_harabaz\": calinski_harabaz_score,\n}\n\n\nclass KElbowVisualizer(ClusteringScoreVisualizer):\n \"\"\"\n The K-Elbow Visualizer implements the \"elbow\" method of selecting the\n optimal number of clusters for K-means clustering. K-means is a simple\n unsupervised machine learning algorithm that groups data into a specified\n number (k) of clusters. Because the user must specify in advance what k to\n choose, the algorithm is somewhat naive -- it assigns all members to k\n clusters even if that is not the right k for the dataset.\n\n The elbow method runs k-means clustering on the dataset for a range of\n values for k (say from 1-10) and then for each value of k computes an\n average score for all clusters. By default, the ``distortion_score`` is\n computed, the sum of square distances from each point to its assigned\n center. Other metrics can also be used such as the ``silhouette_score``,\n the mean silhouette coefficient for all samples or the\n ``calinski_harabaz_score``, which computes the ratio of dispersion between\n and within clusters.\n\n When these overall metrics for each model are plotted, it is possible to\n visually determine the best value for K. If the line chart looks like an\n arm, then the \"elbow\" (the point of inflection on the curve) is the best\n value of k. The \"arm\" can be either up or down, but if there is a strong\n inflection point, it is a good indication that the underlying model fits\n best at that point.\n\n Parameters\n ----------\n\n model : a Scikit-Learn clusterer\n Should be an instance of a clusterer, specifically ``KMeans`` or\n ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised.\n\n ax : matplotlib Axes, default: None\n The axes to plot the figure on. If None is passed in the current axes\n will be used (or generated if required).\n\n k : integer or tuple\n The range of k to compute silhouette scores for. If a single integer\n is specified, then will compute the range (2,k) otherwise the\n specified range in the tuple is used.\n\n metric : string, default: ``\"distortion\"``\n Select the scoring metric to evaluate the clusters. The default is the\n mean distortion, defined by the sum of squared distances between each\n observation and its closest centroid. Other metrics include:\n\n - **distortion**: mean sum of squared distances to centers\n - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance\n - **calinski_harabaz**: ratio of within to between cluster dispersion\n\n timings : bool, default: True\n Display the fitting time per k to evaluate the amount of time required\n to train the clustering model.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Examples\n --------\n\n >>> from yellowbrick.cluster import KElbowVisualizer\n >>> from sklearn.cluster import KMeans\n >>> model = KElbowVisualizer(KMeans(), k=10)\n >>> model.fit(X)\n >>> model.poof()\n\n Notes\n -----\n\n If you get a visualizer that doesn't have an elbow or inflection point,\n then this method may not be working. The elbow method does not work well\n if the data is not very clustered; in this case you might see a smooth\n curve and the value of k is unclear. Other scoring methods such as BIC or\n SSE also can be used to explore if clustering is a correct choice.\n\n For a discussion on the Elbow method, read more at\n `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_.\n\n .. todo:: add parallelization option for performance\n .. todo:: add different metrics for scores and silhoutte\n .. todo:: add timing information about how long its taking\n \"\"\"\n\n def __init__(self, model, ax=None, k=10,\n metric=\"distortion\", timings=True, **kwargs):\n super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Get the scoring method\n if metric not in KELBOW_SCOREMAP:\n raise YellowbrickValueError(\n \"'{}' is not a defined metric \"\n \"use one of distortion, silhouette, or calinski_harabaz\"\n )\n\n # Store the arguments\n self.scoring_metric = KELBOW_SCOREMAP[metric]\n self.timings = timings\n\n # Convert K into a tuple argument if an integer\n if isinstance(k, int):\n k = (2, k+1)\n\n # Expand k in to the values we will use, capturing exceptions\n try:\n k = tuple(k)\n self.k_values_ = list(range(*k))\n except:\n raise YellowbrickValueError((\n \"Specify a range or maximal K value, the value '{}' \"\n \"is not a valid argument for K.\".format(k)\n ))\n\n\n # Holds the values of the silhoutte scores\n self.k_scores_ = None\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Fits n KMeans models where n is the length of ``self.k_values_``,\n storing the silhoutte scores in the ``self.k_scores_`` attribute.\n This method finishes up by calling draw to create the plot.\n \"\"\"\n\n self.k_scores_ = []\n self.k_timers_ = []\n\n for k in self.k_values_:\n # Compute the start time for each model\n start = time.time()\n\n # Set the k value and fit the model\n self.estimator.set_params(n_clusters=k)\n self.estimator.fit(X)\n\n # Append the time and score to our plottable metrics\n self.k_timers_.append(time.time() - start)\n self.k_scores_.append(\n self.scoring_metric(X, self.estimator.labels_)\n )\n\n self.draw()\n\n return self\n\n def draw(self):\n \"\"\"\n Draw the elbow curve for the specified scores and values of K.\n \"\"\"\n # Plot the silhouette score against k\n self.ax.plot(self.k_values_, self.k_scores_, marker=\"D\", label=\"score\")\n\n # If we're going to plot the timings, create a twinx axis\n if self.timings:\n self.axes = [self.ax, self.ax.twinx()]\n self.axes[1].plot(\n self.k_values_, self.k_timers_, label=\"fit time\",\n c='g', marker=\"o\", linestyle=\"--\", alpha=0.75,\n )\n\n return self.ax\n\n def finalize(self):\n \"\"\"\n Prepare the figure for rendering by setting the title as well as the\n X and Y axis labels and adding the legend.\n \"\"\"\n # Get the metric name\n metric = self.scoring_metric.__name__.replace(\"_\", \" \").title()\n\n # Set the title\n self.set_title(\n '{} Elbow for {} Clustering'.format(metric, self.name)\n )\n\n # Set the x and y labels\n self.ax.set_xlabel('k')\n self.ax.set_ylabel(metric.lower())\n\n # Set the second y axis labels\n if self.timings:\n self.axes[1].grid(False)\n self.axes[1].set_ylabel(\"fit time (seconds)\", color='g')\n self.axes[1].tick_params('y', colors='g')\n", "path": "yellowbrick/cluster/elbow.py"}], "after_files": [{"content": "# yellowbrick.cluster.elbow\n# Implements the elbow method for determining the optimal number of clusters.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Mar 23 22:36:31 2017 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: elbow.py [5a370c8] [email protected] $\n\n\"\"\"\nImplements the elbow method for determining the optimal number of clusters.\nhttps://bl.ocks.org/rpgove/0060ff3b656618e9136b\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport time\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom .base import ClusteringScoreVisualizer\nfrom ..exceptions import YellowbrickValueError\n\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.metrics import calinski_harabaz_score\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import LabelEncoder\n\n\n## Packages for export\n__all__ = [\n \"KElbowVisualizer\", \"distortion_score\"\n]\n\n\n##########################################################################\n## Metrics\n##########################################################################\n\ndef distortion_score(X, labels, metric='euclidean'):\n \"\"\"\n Compute the mean distortion of all samples.\n\n The distortion is computed as the the sum of the squared distances between\n each observation and its closest centroid. Logically, this is the metric\n that K-Means attempts to minimize as it is fitting the model.\n\n .. seealso:: http://kldavenport.com/the-cost-function-of-k-means/\n\n Parameters\n ----------\n X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]\n Array of pairwise distances between samples if metric == \"precomputed\"\n or a feature array for computing distances against the labels.\n\n labels : array, shape = [n_samples]\n Predicted labels for each sample\n\n metric : string\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string, it must be one of the options\n allowed by `sklearn.metrics.pairwise.pairwise_distances\n <http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_\n\n .. todo:: add sample_size and random_state kwds similar to silhouette_score\n \"\"\"\n # Encode labels to get unique centers and groups\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n unique_labels = le.classes_\n\n # Sum of the distortions\n distortion = 0\n\n # Loop through each label (center) to compute the centroid\n for current_label in unique_labels:\n # Mask the instances that belong to the current label\n mask = labels == current_label\n instances = X[mask]\n\n # Compute the center of these instances\n center = instances.mean(axis=0)\n\n # NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean\n # returns an array of 1 dimension less than the input. We expect\n # instances to be a 2D array, therefore to do pairwise computation we\n # require center to be a 2D array with a single row (the center).\n # See #370 for more detail.\n if not sp.issparse(instances):\n center = np.array([center])\n\n # Compute the square distances from the instances to the center\n distances = pairwise_distances(instances, center, metric=metric)\n distances = distances ** 2\n\n # Add the mean square distance to the distortion\n distortion += distances.mean()\n\n return distortion\n\n\n##########################################################################\n## Elbow Method\n##########################################################################\n\nKELBOW_SCOREMAP = {\n \"distortion\": distortion_score,\n \"silhouette\": silhouette_score,\n \"calinski_harabaz\": calinski_harabaz_score,\n}\n\n\nclass KElbowVisualizer(ClusteringScoreVisualizer):\n \"\"\"\n The K-Elbow Visualizer implements the \"elbow\" method of selecting the\n optimal number of clusters for K-means clustering. K-means is a simple\n unsupervised machine learning algorithm that groups data into a specified\n number (k) of clusters. Because the user must specify in advance what k to\n choose, the algorithm is somewhat naive -- it assigns all members to k\n clusters even if that is not the right k for the dataset.\n\n The elbow method runs k-means clustering on the dataset for a range of\n values for k (say from 1-10) and then for each value of k computes an\n average score for all clusters. By default, the ``distortion_score`` is\n computed, the sum of square distances from each point to its assigned\n center. Other metrics can also be used such as the ``silhouette_score``,\n the mean silhouette coefficient for all samples or the\n ``calinski_harabaz_score``, which computes the ratio of dispersion between\n and within clusters.\n\n When these overall metrics for each model are plotted, it is possible to\n visually determine the best value for K. If the line chart looks like an\n arm, then the \"elbow\" (the point of inflection on the curve) is the best\n value of k. The \"arm\" can be either up or down, but if there is a strong\n inflection point, it is a good indication that the underlying model fits\n best at that point.\n\n Parameters\n ----------\n\n model : a Scikit-Learn clusterer\n Should be an instance of a clusterer, specifically ``KMeans`` or\n ``MiniBatchKMeans``. If it is not a clusterer, an exception is raised.\n\n ax : matplotlib Axes, default: None\n The axes to plot the figure on. If None is passed in the current axes\n will be used (or generated if required).\n\n k : integer or tuple\n The range of k to compute silhouette scores for. If a single integer\n is specified, then will compute the range (2,k) otherwise the\n specified range in the tuple is used.\n\n metric : string, default: ``\"distortion\"``\n Select the scoring metric to evaluate the clusters. The default is the\n mean distortion, defined by the sum of squared distances between each\n observation and its closest centroid. Other metrics include:\n\n - **distortion**: mean sum of squared distances to centers\n - **silhouette**: mean ratio of intra-cluster and nearest-cluster distance\n - **calinski_harabaz**: ratio of within to between cluster dispersion\n\n timings : bool, default: True\n Display the fitting time per k to evaluate the amount of time required\n to train the clustering model.\n\n kwargs : dict\n Keyword arguments that are passed to the base class and may influence\n the visualization as defined in other Visualizers.\n\n Examples\n --------\n\n >>> from yellowbrick.cluster import KElbowVisualizer\n >>> from sklearn.cluster import KMeans\n >>> model = KElbowVisualizer(KMeans(), k=10)\n >>> model.fit(X)\n >>> model.poof()\n\n Notes\n -----\n\n If you get a visualizer that doesn't have an elbow or inflection point,\n then this method may not be working. The elbow method does not work well\n if the data is not very clustered; in this case you might see a smooth\n curve and the value of k is unclear. Other scoring methods such as BIC or\n SSE also can be used to explore if clustering is a correct choice.\n\n For a discussion on the Elbow method, read more at\n `Robert Gove's Block <https://bl.ocks.org/rpgove/0060ff3b656618e9136b>`_.\n\n .. todo:: add parallelization option for performance\n .. todo:: add different metrics for scores and silhoutte\n .. todo:: add timing information about how long its taking\n \"\"\"\n\n def __init__(self, model, ax=None, k=10,\n metric=\"distortion\", timings=True, **kwargs):\n super(KElbowVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Get the scoring method\n if metric not in KELBOW_SCOREMAP:\n raise YellowbrickValueError(\n \"'{}' is not a defined metric \"\n \"use one of distortion, silhouette, or calinski_harabaz\"\n )\n\n # Store the arguments\n self.scoring_metric = KELBOW_SCOREMAP[metric]\n self.timings = timings\n\n # Convert K into a tuple argument if an integer\n if isinstance(k, int):\n k = (2, k+1)\n\n # Expand k in to the values we will use, capturing exceptions\n try:\n k = tuple(k)\n self.k_values_ = list(range(*k))\n except:\n raise YellowbrickValueError((\n \"Specify a range or maximal K value, the value '{}' \"\n \"is not a valid argument for K.\".format(k)\n ))\n\n\n # Holds the values of the silhoutte scores\n self.k_scores_ = None\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Fits n KMeans models where n is the length of ``self.k_values_``,\n storing the silhoutte scores in the ``self.k_scores_`` attribute.\n This method finishes up by calling draw to create the plot.\n \"\"\"\n\n self.k_scores_ = []\n self.k_timers_ = []\n\n for k in self.k_values_:\n # Compute the start time for each model\n start = time.time()\n\n # Set the k value and fit the model\n self.estimator.set_params(n_clusters=k)\n self.estimator.fit(X)\n\n # Append the time and score to our plottable metrics\n self.k_timers_.append(time.time() - start)\n self.k_scores_.append(\n self.scoring_metric(X, self.estimator.labels_)\n )\n\n self.draw()\n\n return self\n\n def draw(self):\n \"\"\"\n Draw the elbow curve for the specified scores and values of K.\n \"\"\"\n # Plot the silhouette score against k\n self.ax.plot(self.k_values_, self.k_scores_, marker=\"D\", label=\"score\")\n\n # If we're going to plot the timings, create a twinx axis\n if self.timings:\n self.axes = [self.ax, self.ax.twinx()]\n self.axes[1].plot(\n self.k_values_, self.k_timers_, label=\"fit time\",\n c='g', marker=\"o\", linestyle=\"--\", alpha=0.75,\n )\n\n return self.ax\n\n def finalize(self):\n \"\"\"\n Prepare the figure for rendering by setting the title as well as the\n X and Y axis labels and adding the legend.\n \"\"\"\n # Get the metric name\n metric = self.scoring_metric.__name__.replace(\"_\", \" \").title()\n\n # Set the title\n self.set_title(\n '{} Elbow for {} Clustering'.format(metric, self.name)\n )\n\n # Set the x and y labels\n self.ax.set_xlabel('k')\n self.ax.set_ylabel(metric.lower())\n\n # Set the second y axis labels\n if self.timings:\n self.axes[1].grid(False)\n self.axes[1].set_ylabel(\"fit time (seconds)\", color='g')\n self.axes[1].tick_params('y', colors='g')\n", "path": "yellowbrick/cluster/elbow.py"}]}
4,083
301
gh_patches_debug_275
rasdani/github-patches
git_diff
sanic-org__sanic-878
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 0.5.5 release request Because 0.5.4 has actual protocol parsing problem (#755) I request to quickly release 0.5.5. It causes actual request loss and unhandlable 400 errors for the sanic users. (unless they make local patch for sanic) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/__init__.py` Content: ``` 1 from sanic.app import Sanic 2 from sanic.blueprints import Blueprint 3 4 __version__ = '0.5.4' 5 6 __all__ = ['Sanic', 'Blueprint'] 7 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/__init__.py b/sanic/__init__.py --- a/sanic/__init__.py +++ b/sanic/__init__.py @@ -1,6 +1,6 @@ from sanic.app import Sanic from sanic.blueprints import Blueprint -__version__ = '0.5.4' +__version__ = '0.6.0' __all__ = ['Sanic', 'Blueprint']
{"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -1,6 +1,6 @@\n from sanic.app import Sanic\n from sanic.blueprints import Blueprint\n \n-__version__ = '0.5.4'\n+__version__ = '0.6.0'\n \n __all__ = ['Sanic', 'Blueprint']\n", "issue": "0.5.5 release request\nBecause 0.5.4 has actual protocol parsing problem (#755) I request to quickly release 0.5.5.\r\n\r\nIt causes actual request loss and unhandlable 400 errors for the sanic users. (unless they make local patch for sanic)\n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.5.4'\n\n__all__ = ['Sanic', 'Blueprint']\n", "path": "sanic/__init__.py"}], "after_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n__version__ = '0.6.0'\n\n__all__ = ['Sanic', 'Blueprint']\n", "path": "sanic/__init__.py"}]}
376
98
gh_patches_debug_19326
rasdani/github-patches
git_diff
pwndbg__pwndbg-1779
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `telescope -r` should always display the input address `telescope -r addr` will skip displaying `addr` if the value at that address is equal to the previous few values: ``` pwndbg> p &target_address $1 = (unsigned long *) 0x5555555582a0 <target_address> pwndbg> telescope -r &target_address 00:0000│ 0x555555558208 (buf+360) ◂— 0x0 ... ↓ 4 skipped 05:0028│ rax 0x555555558230 (buf+400) ◂— 0x80 06:0030│ 0x555555558238 (buf+408) ◂— 0x0 ... ↓ 13 skipped ``` We should make this command always display the address that the user asked for: ``` pwndbg> telescope -r &target_address 00:0000│ 0x555555558208 (buf+360) ◂— 0x0 ... ↓ 4 skipped 05:0028│ rax 0x555555558230 (buf+400) ◂— 0x80 06:0030│ 0x555555558238 (buf+408) ◂— 0x0 ... ↓ 12 skipped 13:0098│ 0x5555555582a0 (target_address) ◂— 0x0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/commands/telescope.py` Content: ``` 1 """ 2 Prints out pointer chains starting at some address in memory. 3 4 Generally used to print out the stack or register values. 5 """ 6 7 import argparse 8 import collections 9 import math 10 from typing import List 11 12 import pwndbg.chain 13 import pwndbg.color.telescope as T 14 import pwndbg.commands 15 import pwndbg.gdblib.arch 16 import pwndbg.gdblib.config 17 import pwndbg.gdblib.memory 18 import pwndbg.gdblib.regs 19 import pwndbg.gdblib.typeinfo 20 from pwndbg.color import theme 21 from pwndbg.commands import CommandCategory 22 23 telescope_lines = pwndbg.gdblib.config.add_param( 24 "telescope-lines", 8, "number of lines to printed by the telescope command" 25 ) 26 skip_repeating_values = pwndbg.gdblib.config.add_param( 27 "telescope-skip-repeating-val", 28 True, 29 "whether to skip repeating values of the telescope command", 30 ) 31 skip_repeating_values_minimum = pwndbg.gdblib.config.add_param( 32 "telescope-skip-repeating-val-minimum", 33 3, 34 "minimum amount of repeated values before skipping lines", 35 ) 36 37 offset_separator = theme.add_param( 38 "telescope-offset-separator", "│", "offset separator of the telescope command" 39 ) 40 offset_delimiter = theme.add_param( 41 "telescope-offset-delimiter", ":", "offset delimiter of the telescope command" 42 ) 43 repeating_marker = theme.add_param( 44 "telescope-repeating-marker", "... ↓", "repeating values marker of the telescope command" 45 ) 46 47 48 parser = argparse.ArgumentParser( 49 description="Recursively dereferences pointers starting at the specified address." 50 ) 51 parser.add_argument( 52 "-r", 53 "--reverse", 54 dest="reverse", 55 action="store_true", 56 default=False, 57 help="Show <count> previous addresses instead of next ones", 58 ) 59 60 parser.add_argument( 61 "address", nargs="?", default="$sp", type=int, help="The address to telescope at." 62 ) 63 64 parser.add_argument( 65 "count", nargs="?", default=telescope_lines, type=int, help="The number of lines to show." 66 ) 67 68 69 @pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.MEMORY) 70 @pwndbg.commands.OnlyWhenRunning 71 def telescope(address=None, count=telescope_lines, to_string=False, reverse=False): 72 """ 73 Recursively dereferences pointers starting at the specified address 74 ($sp by default) 75 """ 76 ptrsize = pwndbg.gdblib.typeinfo.ptrsize 77 if telescope.repeat: 78 address = telescope.last_address + ptrsize 79 telescope.offset += 1 80 else: 81 telescope.offset = 0 82 83 address = int(address if address else pwndbg.gdblib.regs.sp) & pwndbg.gdblib.arch.ptrmask 84 count = max(int(count), 1) & pwndbg.gdblib.arch.ptrmask 85 delimiter = T.delimiter(offset_delimiter) 86 separator = T.separator(offset_separator) 87 88 # Allow invocation of "telescope 20" to dump 20 bytes at the stack pointer 89 if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR and not pwndbg.gdblib.memory.peek(address): 90 count = address 91 address = pwndbg.gdblib.regs.sp 92 93 # Allow invocation of telescope -r to dump previous addresses 94 if reverse: 95 address -= (count - 1) * ptrsize 96 97 # Allow invocation of "telescope a b" to dump all bytes from A to B 98 if int(address) <= int(count): 99 # adjust count if it is an address. use ceil division as count is number of 100 # ptrsize values and we don't want to strip out a value if dest is unaligned 101 count -= address 102 count = max(math.ceil(count / ptrsize), 1) 103 104 reg_values = collections.defaultdict(lambda: []) 105 for reg in pwndbg.gdblib.regs.common: 106 reg_values[pwndbg.gdblib.regs[reg]].append(reg) 107 108 start = address 109 stop = address + (count * ptrsize) 110 step = ptrsize 111 112 # Find all registers which show up in the trace 113 regs = {} 114 for i in range(start, stop, step): 115 values = list(reg_values[i]) 116 117 for width in range(1, pwndbg.gdblib.arch.ptrsize): 118 values.extend("%s-%i" % (r, width) for r in reg_values[i + width]) 119 120 regs[i] = " ".join(values) 121 122 # Find the longest set of register information 123 if regs: 124 longest_regs = max(map(len, regs.values())) 125 else: 126 longest_regs = 0 127 128 # Print everything out 129 result = [] 130 last = None 131 collapse_buffer: List[str] = [] 132 skipped_padding = ( 133 2 134 + len(offset_delimiter) 135 + 4 136 + len(offset_separator) 137 + 1 138 + longest_regs 139 + 1 140 - len(repeating_marker) 141 ) 142 143 # Collapse repeating values exceeding minimum delta. 144 def collapse_repeating_values() -> None: 145 # The first line was already printed, hence increment by 1 146 if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum: 147 result.append( 148 T.repeating_marker( 149 "%s%s%i skipped" 150 % (repeating_marker, " " * skipped_padding, len(collapse_buffer)) 151 ) 152 ) 153 else: 154 result.extend(collapse_buffer) 155 collapse_buffer.clear() 156 157 for i, addr in enumerate(range(start, stop, step)): 158 if not pwndbg.gdblib.memory.peek(addr): 159 collapse_repeating_values() 160 result.append("<Could not read memory at %#x>" % addr) 161 break 162 163 line = " ".join( 164 ( 165 T.offset( 166 "%02x%s%04x%s" 167 % ( 168 i + telescope.offset, 169 delimiter, 170 addr - start + (telescope.offset * ptrsize), 171 separator, 172 ) 173 ), 174 T.register(regs[addr].ljust(longest_regs)), 175 pwndbg.chain.format(addr), 176 ) 177 ) 178 179 # Buffer repeating values. 180 if skip_repeating_values: 181 value = pwndbg.gdblib.memory.pvoid(addr) 182 if last == value: 183 collapse_buffer.append(line) 184 continue 185 collapse_repeating_values() 186 last = value 187 188 result.append(line) 189 190 collapse_repeating_values() 191 telescope.offset += i 192 telescope.last_address = addr 193 194 if not to_string: 195 print("\n".join(result)) 196 197 return result 198 199 200 parser = argparse.ArgumentParser( 201 description="Dereferences on stack data with specified count and offset." 202 ) 203 parser.add_argument("count", nargs="?", default=8, type=int, help="number of element to dump") 204 parser.add_argument( 205 "offset", 206 nargs="?", 207 default=0, 208 type=int, 209 help="Element offset from $sp (support negative offset)", 210 ) 211 212 213 @pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.STACK) 214 @pwndbg.commands.OnlyWhenRunning 215 def stack(count, offset) -> None: 216 ptrsize = pwndbg.gdblib.typeinfo.ptrsize 217 telescope.repeat = stack.repeat 218 telescope(address=pwndbg.gdblib.regs.sp + offset * ptrsize, count=count) 219 220 221 telescope.last_address = 0 222 telescope.offset = 0 223 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/commands/telescope.py b/pwndbg/commands/telescope.py --- a/pwndbg/commands/telescope.py +++ b/pwndbg/commands/telescope.py @@ -81,6 +81,7 @@ telescope.offset = 0 address = int(address if address else pwndbg.gdblib.regs.sp) & pwndbg.gdblib.arch.ptrmask + input_address = address count = max(int(count), 1) & pwndbg.gdblib.arch.ptrmask delimiter = T.delimiter(offset_delimiter) separator = T.separator(offset_separator) @@ -179,7 +180,7 @@ # Buffer repeating values. if skip_repeating_values: value = pwndbg.gdblib.memory.pvoid(addr) - if last == value: + if last == value and addr != input_address: collapse_buffer.append(line) continue collapse_repeating_values()
{"golden_diff": "diff --git a/pwndbg/commands/telescope.py b/pwndbg/commands/telescope.py\n--- a/pwndbg/commands/telescope.py\n+++ b/pwndbg/commands/telescope.py\n@@ -81,6 +81,7 @@\n telescope.offset = 0\n \n address = int(address if address else pwndbg.gdblib.regs.sp) & pwndbg.gdblib.arch.ptrmask\n+ input_address = address\n count = max(int(count), 1) & pwndbg.gdblib.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n@@ -179,7 +180,7 @@\n # Buffer repeating values.\n if skip_repeating_values:\n value = pwndbg.gdblib.memory.pvoid(addr)\n- if last == value:\n+ if last == value and addr != input_address:\n collapse_buffer.append(line)\n continue\n collapse_repeating_values()\n", "issue": "`telescope -r` should always display the input address\n`telescope -r addr` will skip displaying `addr` if the value at that address is equal to the previous few values:\r\n```\r\npwndbg> p &target_address\r\n$1 = (unsigned long *) 0x5555555582a0 <target_address>\r\npwndbg> telescope -r &target_address\r\n00:0000\u2502 0x555555558208 (buf+360) \u25c2\u2014 0x0\r\n... \u2193 4 skipped\r\n05:0028\u2502 rax 0x555555558230 (buf+400) \u25c2\u2014 0x80\r\n06:0030\u2502 0x555555558238 (buf+408) \u25c2\u2014 0x0\r\n... \u2193 13 skipped\r\n```\r\nWe should make this command always display the address that the user asked for:\r\n```\r\npwndbg> telescope -r &target_address\r\n00:0000\u2502 0x555555558208 (buf+360) \u25c2\u2014 0x0\r\n... \u2193 4 skipped\r\n05:0028\u2502 rax 0x555555558230 (buf+400) \u25c2\u2014 0x80\r\n06:0030\u2502 0x555555558238 (buf+408) \u25c2\u2014 0x0\r\n... \u2193 12 skipped\r\n13:0098\u2502 0x5555555582a0 (target_address) \u25c2\u2014 0x0\r\n```\n", "before_files": [{"content": "\"\"\"\nPrints out pointer chains starting at some address in memory.\n\nGenerally used to print out the stack or register values.\n\"\"\"\n\nimport argparse\nimport collections\nimport math\nfrom typing import List\n\nimport pwndbg.chain\nimport pwndbg.color.telescope as T\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.config\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.regs\nimport pwndbg.gdblib.typeinfo\nfrom pwndbg.color import theme\nfrom pwndbg.commands import CommandCategory\n\ntelescope_lines = pwndbg.gdblib.config.add_param(\n \"telescope-lines\", 8, \"number of lines to printed by the telescope command\"\n)\nskip_repeating_values = pwndbg.gdblib.config.add_param(\n \"telescope-skip-repeating-val\",\n True,\n \"whether to skip repeating values of the telescope command\",\n)\nskip_repeating_values_minimum = pwndbg.gdblib.config.add_param(\n \"telescope-skip-repeating-val-minimum\",\n 3,\n \"minimum amount of repeated values before skipping lines\",\n)\n\noffset_separator = theme.add_param(\n \"telescope-offset-separator\", \"\u2502\", \"offset separator of the telescope command\"\n)\noffset_delimiter = theme.add_param(\n \"telescope-offset-delimiter\", \":\", \"offset delimiter of the telescope command\"\n)\nrepeating_marker = theme.add_param(\n \"telescope-repeating-marker\", \"... \u2193\", \"repeating values marker of the telescope command\"\n)\n\n\nparser = argparse.ArgumentParser(\n description=\"Recursively dereferences pointers starting at the specified address.\"\n)\nparser.add_argument(\n \"-r\",\n \"--reverse\",\n dest=\"reverse\",\n action=\"store_true\",\n default=False,\n help=\"Show <count> previous addresses instead of next ones\",\n)\n\nparser.add_argument(\n \"address\", nargs=\"?\", default=\"$sp\", type=int, help=\"The address to telescope at.\"\n)\n\nparser.add_argument(\n \"count\", nargs=\"?\", default=telescope_lines, type=int, help=\"The number of lines to show.\"\n)\n\n\[email protected](parser, category=CommandCategory.MEMORY)\[email protected]\ndef telescope(address=None, count=telescope_lines, to_string=False, reverse=False):\n \"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\"\n ptrsize = pwndbg.gdblib.typeinfo.ptrsize\n if telescope.repeat:\n address = telescope.last_address + ptrsize\n telescope.offset += 1\n else:\n telescope.offset = 0\n\n address = int(address if address else pwndbg.gdblib.regs.sp) & pwndbg.gdblib.arch.ptrmask\n count = max(int(count), 1) & pwndbg.gdblib.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n\n # Allow invocation of \"telescope 20\" to dump 20 bytes at the stack pointer\n if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR and not pwndbg.gdblib.memory.peek(address):\n count = address\n address = pwndbg.gdblib.regs.sp\n\n # Allow invocation of telescope -r to dump previous addresses\n if reverse:\n address -= (count - 1) * ptrsize\n\n # Allow invocation of \"telescope a b\" to dump all bytes from A to B\n if int(address) <= int(count):\n # adjust count if it is an address. use ceil division as count is number of\n # ptrsize values and we don't want to strip out a value if dest is unaligned\n count -= address\n count = max(math.ceil(count / ptrsize), 1)\n\n reg_values = collections.defaultdict(lambda: [])\n for reg in pwndbg.gdblib.regs.common:\n reg_values[pwndbg.gdblib.regs[reg]].append(reg)\n\n start = address\n stop = address + (count * ptrsize)\n step = ptrsize\n\n # Find all registers which show up in the trace\n regs = {}\n for i in range(start, stop, step):\n values = list(reg_values[i])\n\n for width in range(1, pwndbg.gdblib.arch.ptrsize):\n values.extend(\"%s-%i\" % (r, width) for r in reg_values[i + width])\n\n regs[i] = \" \".join(values)\n\n # Find the longest set of register information\n if regs:\n longest_regs = max(map(len, regs.values()))\n else:\n longest_regs = 0\n\n # Print everything out\n result = []\n last = None\n collapse_buffer: List[str] = []\n skipped_padding = (\n 2\n + len(offset_delimiter)\n + 4\n + len(offset_separator)\n + 1\n + longest_regs\n + 1\n - len(repeating_marker)\n )\n\n # Collapse repeating values exceeding minimum delta.\n def collapse_repeating_values() -> None:\n # The first line was already printed, hence increment by 1\n if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum:\n result.append(\n T.repeating_marker(\n \"%s%s%i skipped\"\n % (repeating_marker, \" \" * skipped_padding, len(collapse_buffer))\n )\n )\n else:\n result.extend(collapse_buffer)\n collapse_buffer.clear()\n\n for i, addr in enumerate(range(start, stop, step)):\n if not pwndbg.gdblib.memory.peek(addr):\n collapse_repeating_values()\n result.append(\"<Could not read memory at %#x>\" % addr)\n break\n\n line = \" \".join(\n (\n T.offset(\n \"%02x%s%04x%s\"\n % (\n i + telescope.offset,\n delimiter,\n addr - start + (telescope.offset * ptrsize),\n separator,\n )\n ),\n T.register(regs[addr].ljust(longest_regs)),\n pwndbg.chain.format(addr),\n )\n )\n\n # Buffer repeating values.\n if skip_repeating_values:\n value = pwndbg.gdblib.memory.pvoid(addr)\n if last == value:\n collapse_buffer.append(line)\n continue\n collapse_repeating_values()\n last = value\n\n result.append(line)\n\n collapse_repeating_values()\n telescope.offset += i\n telescope.last_address = addr\n\n if not to_string:\n print(\"\\n\".join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(\n description=\"Dereferences on stack data with specified count and offset.\"\n)\nparser.add_argument(\"count\", nargs=\"?\", default=8, type=int, help=\"number of element to dump\")\nparser.add_argument(\n \"offset\",\n nargs=\"?\",\n default=0,\n type=int,\n help=\"Element offset from $sp (support negative offset)\",\n)\n\n\[email protected](parser, category=CommandCategory.STACK)\[email protected]\ndef stack(count, offset) -> None:\n ptrsize = pwndbg.gdblib.typeinfo.ptrsize\n telescope.repeat = stack.repeat\n telescope(address=pwndbg.gdblib.regs.sp + offset * ptrsize, count=count)\n\n\ntelescope.last_address = 0\ntelescope.offset = 0\n", "path": "pwndbg/commands/telescope.py"}], "after_files": [{"content": "\"\"\"\nPrints out pointer chains starting at some address in memory.\n\nGenerally used to print out the stack or register values.\n\"\"\"\n\nimport argparse\nimport collections\nimport math\nfrom typing import List\n\nimport pwndbg.chain\nimport pwndbg.color.telescope as T\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.config\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.regs\nimport pwndbg.gdblib.typeinfo\nfrom pwndbg.color import theme\nfrom pwndbg.commands import CommandCategory\n\ntelescope_lines = pwndbg.gdblib.config.add_param(\n \"telescope-lines\", 8, \"number of lines to printed by the telescope command\"\n)\nskip_repeating_values = pwndbg.gdblib.config.add_param(\n \"telescope-skip-repeating-val\",\n True,\n \"whether to skip repeating values of the telescope command\",\n)\nskip_repeating_values_minimum = pwndbg.gdblib.config.add_param(\n \"telescope-skip-repeating-val-minimum\",\n 3,\n \"minimum amount of repeated values before skipping lines\",\n)\n\noffset_separator = theme.add_param(\n \"telescope-offset-separator\", \"\u2502\", \"offset separator of the telescope command\"\n)\noffset_delimiter = theme.add_param(\n \"telescope-offset-delimiter\", \":\", \"offset delimiter of the telescope command\"\n)\nrepeating_marker = theme.add_param(\n \"telescope-repeating-marker\", \"... \u2193\", \"repeating values marker of the telescope command\"\n)\n\n\nparser = argparse.ArgumentParser(\n description=\"Recursively dereferences pointers starting at the specified address.\"\n)\nparser.add_argument(\n \"-r\",\n \"--reverse\",\n dest=\"reverse\",\n action=\"store_true\",\n default=False,\n help=\"Show <count> previous addresses instead of next ones\",\n)\n\nparser.add_argument(\n \"address\", nargs=\"?\", default=\"$sp\", type=int, help=\"The address to telescope at.\"\n)\n\nparser.add_argument(\n \"count\", nargs=\"?\", default=telescope_lines, type=int, help=\"The number of lines to show.\"\n)\n\n\[email protected](parser, category=CommandCategory.MEMORY)\[email protected]\ndef telescope(address=None, count=telescope_lines, to_string=False, reverse=False):\n \"\"\"\n Recursively dereferences pointers starting at the specified address\n ($sp by default)\n \"\"\"\n ptrsize = pwndbg.gdblib.typeinfo.ptrsize\n if telescope.repeat:\n address = telescope.last_address + ptrsize\n telescope.offset += 1\n else:\n telescope.offset = 0\n\n address = int(address if address else pwndbg.gdblib.regs.sp) & pwndbg.gdblib.arch.ptrmask\n input_address = address\n count = max(int(count), 1) & pwndbg.gdblib.arch.ptrmask\n delimiter = T.delimiter(offset_delimiter)\n separator = T.separator(offset_separator)\n\n # Allow invocation of \"telescope 20\" to dump 20 bytes at the stack pointer\n if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR and not pwndbg.gdblib.memory.peek(address):\n count = address\n address = pwndbg.gdblib.regs.sp\n\n # Allow invocation of telescope -r to dump previous addresses\n if reverse:\n address -= (count - 1) * ptrsize\n\n # Allow invocation of \"telescope a b\" to dump all bytes from A to B\n if int(address) <= int(count):\n # adjust count if it is an address. use ceil division as count is number of\n # ptrsize values and we don't want to strip out a value if dest is unaligned\n count -= address\n count = max(math.ceil(count / ptrsize), 1)\n\n reg_values = collections.defaultdict(lambda: [])\n for reg in pwndbg.gdblib.regs.common:\n reg_values[pwndbg.gdblib.regs[reg]].append(reg)\n\n start = address\n stop = address + (count * ptrsize)\n step = ptrsize\n\n # Find all registers which show up in the trace\n regs = {}\n for i in range(start, stop, step):\n values = list(reg_values[i])\n\n for width in range(1, pwndbg.gdblib.arch.ptrsize):\n values.extend(\"%s-%i\" % (r, width) for r in reg_values[i + width])\n\n regs[i] = \" \".join(values)\n\n # Find the longest set of register information\n if regs:\n longest_regs = max(map(len, regs.values()))\n else:\n longest_regs = 0\n\n # Print everything out\n result = []\n last = None\n collapse_buffer: List[str] = []\n skipped_padding = (\n 2\n + len(offset_delimiter)\n + 4\n + len(offset_separator)\n + 1\n + longest_regs\n + 1\n - len(repeating_marker)\n )\n\n # Collapse repeating values exceeding minimum delta.\n def collapse_repeating_values() -> None:\n # The first line was already printed, hence increment by 1\n if collapse_buffer and len(collapse_buffer) + 1 >= skip_repeating_values_minimum:\n result.append(\n T.repeating_marker(\n \"%s%s%i skipped\"\n % (repeating_marker, \" \" * skipped_padding, len(collapse_buffer))\n )\n )\n else:\n result.extend(collapse_buffer)\n collapse_buffer.clear()\n\n for i, addr in enumerate(range(start, stop, step)):\n if not pwndbg.gdblib.memory.peek(addr):\n collapse_repeating_values()\n result.append(\"<Could not read memory at %#x>\" % addr)\n break\n\n line = \" \".join(\n (\n T.offset(\n \"%02x%s%04x%s\"\n % (\n i + telescope.offset,\n delimiter,\n addr - start + (telescope.offset * ptrsize),\n separator,\n )\n ),\n T.register(regs[addr].ljust(longest_regs)),\n pwndbg.chain.format(addr),\n )\n )\n\n # Buffer repeating values.\n if skip_repeating_values:\n value = pwndbg.gdblib.memory.pvoid(addr)\n if last == value and addr != input_address:\n collapse_buffer.append(line)\n continue\n collapse_repeating_values()\n last = value\n\n result.append(line)\n\n collapse_repeating_values()\n telescope.offset += i\n telescope.last_address = addr\n\n if not to_string:\n print(\"\\n\".join(result))\n\n return result\n\n\nparser = argparse.ArgumentParser(\n description=\"Dereferences on stack data with specified count and offset.\"\n)\nparser.add_argument(\"count\", nargs=\"?\", default=8, type=int, help=\"number of element to dump\")\nparser.add_argument(\n \"offset\",\n nargs=\"?\",\n default=0,\n type=int,\n help=\"Element offset from $sp (support negative offset)\",\n)\n\n\[email protected](parser, category=CommandCategory.STACK)\[email protected]\ndef stack(count, offset) -> None:\n ptrsize = pwndbg.gdblib.typeinfo.ptrsize\n telescope.repeat = stack.repeat\n telescope(address=pwndbg.gdblib.regs.sp + offset * ptrsize, count=count)\n\n\ntelescope.last_address = 0\ntelescope.offset = 0\n", "path": "pwndbg/commands/telescope.py"}]}
2,866
217
gh_patches_debug_29561
rasdani/github-patches
git_diff
quantumlib__Cirq-5777
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Printing Moment containing classical control fails **Description of the issue** A classically controlled op causes moment printing to fail. The op itself renders correctly, and a circuit containing the moment renders correctly. Just the moment itself fails to render. **How to reproduce the issue** ```python op = cirq.X(cirq.NamedQubit('q')).with_classical_controls('m') m = cirq.Moment(op) print(op) print(cirq.Circuit(m)) print(m) ``` ``` X(q).with_classical_controls(m) q: ───X─── ║ m: ═══^═══ ValueError: Wanted diagram info from cirq.ClassicallyControlledOperation(cirq.X(cirq.NamedQubit('q')), [cirq.KeyCondition(cirq.MeasurementKey(name='m'))]) for [cirq.NamedQubit('q')]) but got cirq.CircuitDiagramInfo(wire_symbols=('X', '^'), exponent=1.0, connected=True, exponent_qubit_index=1, auto_exponent_parens=True) cirq-core/cirq/protocols/circuit_diagram_info_protocol.py:345: ValueError ``` v0.15 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq-core/cirq/ops/classically_controlled_operation.py` Content: ``` 1 # Copyright 2021 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import ( 15 AbstractSet, 16 Any, 17 Mapping, 18 Dict, 19 FrozenSet, 20 List, 21 Optional, 22 Sequence, 23 TYPE_CHECKING, 24 Tuple, 25 Union, 26 ) 27 28 import sympy 29 30 from cirq import protocols, value 31 from cirq.ops import raw_types 32 33 if TYPE_CHECKING: 34 import cirq 35 36 37 @value.value_equality 38 class ClassicallyControlledOperation(raw_types.Operation): 39 """Augments existing operations to be conditionally executed. 40 41 An operation that is classically controlled is executed iff all conditions 42 evaluate to True. Currently the only condition type is a measurement key. 43 A measurement key evaluates to True iff any qubit in the corresponding 44 measurement operation evaluated to a non-zero value. 45 46 This object is typically created via 47 `operation.with_classical_controls(*conditions)`. 48 """ 49 50 def __init__( 51 self, 52 sub_operation: 'cirq.Operation', 53 conditions: Sequence[Union[str, 'cirq.MeasurementKey', 'cirq.Condition', sympy.Basic]], 54 ): 55 """Initializes a `ClassicallyControlledOperation`. 56 57 Multiple consecutive `ClassicallyControlledOperation` layers are 58 squashed when possible, so one should not depend on a specific number 59 of layers. 60 61 Args: 62 sub_operation: The operation to gate with a classical control 63 condition. 64 conditions: A sequence of measurement keys, or strings that can be 65 parsed into measurement keys. 66 67 Raises: 68 ValueError: If an unsupported gate is being classically 69 controlled. 70 """ 71 if protocols.measurement_key_objs(sub_operation): 72 raise ValueError( 73 f'Cannot conditionally run operations with measurements: {sub_operation}' 74 ) 75 conditions = tuple(conditions) 76 if isinstance(sub_operation, ClassicallyControlledOperation): 77 conditions += sub_operation._conditions 78 sub_operation = sub_operation._sub_operation 79 conds: List['cirq.Condition'] = [] 80 for c in conditions: 81 if isinstance(c, str): 82 c = value.MeasurementKey.parse_serialized(c) 83 if isinstance(c, value.MeasurementKey): 84 c = value.KeyCondition(c) 85 if isinstance(c, sympy.Basic): 86 c = value.SympyCondition(c) 87 conds.append(c) 88 self._conditions: Tuple['cirq.Condition', ...] = tuple(conds) 89 self._sub_operation: 'cirq.Operation' = sub_operation 90 91 @property 92 def classical_controls(self) -> FrozenSet['cirq.Condition']: 93 return frozenset(self._conditions).union(self._sub_operation.classical_controls) 94 95 def without_classical_controls(self) -> 'cirq.Operation': 96 return self._sub_operation.without_classical_controls() 97 98 @property 99 def qubits(self): 100 return self._sub_operation.qubits 101 102 def with_qubits(self, *new_qubits): 103 return self._sub_operation.with_qubits(*new_qubits).with_classical_controls( 104 *self._conditions 105 ) 106 107 def _decompose_(self): 108 result = protocols.decompose_once(self._sub_operation, NotImplemented) 109 if result is NotImplemented: 110 return NotImplemented 111 112 return [ClassicallyControlledOperation(op, self._conditions) for op in result] 113 114 def _value_equality_values_(self): 115 return (frozenset(self._conditions), self._sub_operation) 116 117 def __str__(self) -> str: 118 keys = ', '.join(map(str, self._conditions)) 119 return f'{self._sub_operation}.with_classical_controls({keys})' 120 121 def __repr__(self): 122 return ( 123 f'cirq.ClassicallyControlledOperation(' 124 f'{self._sub_operation!r}, {list(self._conditions)!r})' 125 ) 126 127 def _is_parameterized_(self) -> bool: 128 return protocols.is_parameterized(self._sub_operation) 129 130 def _parameter_names_(self) -> AbstractSet[str]: 131 return protocols.parameter_names(self._sub_operation) 132 133 def _resolve_parameters_( 134 self, resolver: 'cirq.ParamResolver', recursive: bool 135 ) -> 'ClassicallyControlledOperation': 136 new_sub_op = protocols.resolve_parameters(self._sub_operation, resolver, recursive) 137 return ClassicallyControlledOperation(new_sub_op, self._conditions) 138 139 def _circuit_diagram_info_( 140 self, args: 'cirq.CircuitDiagramInfoArgs' 141 ) -> Optional['protocols.CircuitDiagramInfo']: 142 sub_args = protocols.CircuitDiagramInfoArgs( 143 known_qubit_count=args.known_qubit_count, 144 known_qubits=args.known_qubits, 145 use_unicode_characters=args.use_unicode_characters, 146 precision=args.precision, 147 label_map=args.label_map, 148 ) 149 sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None) 150 if sub_info is None: 151 return NotImplemented # coverage: ignore 152 control_count = len({k for c in self._conditions for k in c.keys}) 153 wire_symbols = sub_info.wire_symbols + ('^',) * control_count 154 if any(not isinstance(c, value.KeyCondition) for c in self._conditions): 155 wire_symbols = ( 156 wire_symbols[0] 157 + '(conditions=[' 158 + ', '.join(str(c) for c in self._conditions) 159 + '])', 160 ) + wire_symbols[1:] 161 exponent_qubit_index = None 162 if sub_info.exponent_qubit_index is not None: 163 exponent_qubit_index = sub_info.exponent_qubit_index + control_count 164 elif sub_info.exponent is not None: 165 exponent_qubit_index = control_count 166 return protocols.CircuitDiagramInfo( 167 wire_symbols=wire_symbols, 168 exponent=sub_info.exponent, 169 exponent_qubit_index=exponent_qubit_index, 170 ) 171 172 def _json_dict_(self) -> Dict[str, Any]: 173 return {'conditions': self._conditions, 'sub_operation': self._sub_operation} 174 175 def _act_on_(self, sim_state: 'cirq.SimulationStateBase') -> bool: 176 if all(c.resolve(sim_state.classical_data) for c in self._conditions): 177 protocols.act_on(self._sub_operation, sim_state) 178 return True 179 180 def _with_measurement_key_mapping_( 181 self, key_map: Mapping[str, str] 182 ) -> 'ClassicallyControlledOperation': 183 conditions = [protocols.with_measurement_key_mapping(c, key_map) for c in self._conditions] 184 sub_operation = protocols.with_measurement_key_mapping(self._sub_operation, key_map) 185 sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation 186 return sub_operation.with_classical_controls(*conditions) 187 188 def _with_key_path_prefix_(self, prefix: Tuple[str, ...]) -> 'ClassicallyControlledOperation': 189 conditions = [protocols.with_key_path_prefix(c, prefix) for c in self._conditions] 190 sub_operation = protocols.with_key_path_prefix(self._sub_operation, prefix) 191 sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation 192 return sub_operation.with_classical_controls(*conditions) 193 194 def _with_rescoped_keys_( 195 self, path: Tuple[str, ...], bindable_keys: FrozenSet['cirq.MeasurementKey'] 196 ) -> 'ClassicallyControlledOperation': 197 conds = [protocols.with_rescoped_keys(c, path, bindable_keys) for c in self._conditions] 198 sub_operation = protocols.with_rescoped_keys(self._sub_operation, path, bindable_keys) 199 return sub_operation.with_classical_controls(*conds) 200 201 def _control_keys_(self) -> FrozenSet['cirq.MeasurementKey']: 202 local_keys: FrozenSet['cirq.MeasurementKey'] = frozenset( 203 k for condition in self._conditions for k in condition.keys 204 ) 205 return local_keys.union(protocols.control_keys(self._sub_operation)) 206 207 def _qasm_(self, args: 'cirq.QasmArgs') -> Optional[str]: 208 args.validate_version('2.0') 209 if len(self._conditions) > 1: 210 raise ValueError('QASM does not support multiple conditions.') 211 subop_qasm = protocols.qasm(self._sub_operation, args=args) 212 if not self._conditions: 213 return subop_qasm 214 return f'if ({self._conditions[0].qasm}) {subop_qasm}' 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq-core/cirq/ops/classically_controlled_operation.py b/cirq-core/cirq/ops/classically_controlled_operation.py --- a/cirq-core/cirq/ops/classically_controlled_operation.py +++ b/cirq-core/cirq/ops/classically_controlled_operation.py @@ -149,9 +149,13 @@ sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None) if sub_info is None: return NotImplemented # coverage: ignore - control_count = len({k for c in self._conditions for k in c.keys}) - wire_symbols = sub_info.wire_symbols + ('^',) * control_count - if any(not isinstance(c, value.KeyCondition) for c in self._conditions): + control_label_count = 0 + if args.label_map is not None: + control_label_count = len({k for c in self._conditions for k in c.keys}) + wire_symbols = sub_info.wire_symbols + ('^',) * control_label_count + if control_label_count == 0 or any( + not isinstance(c, value.KeyCondition) for c in self._conditions + ): wire_symbols = ( wire_symbols[0] + '(conditions=[' @@ -160,9 +164,9 @@ ) + wire_symbols[1:] exponent_qubit_index = None if sub_info.exponent_qubit_index is not None: - exponent_qubit_index = sub_info.exponent_qubit_index + control_count + exponent_qubit_index = sub_info.exponent_qubit_index + control_label_count elif sub_info.exponent is not None: - exponent_qubit_index = control_count + exponent_qubit_index = control_label_count return protocols.CircuitDiagramInfo( wire_symbols=wire_symbols, exponent=sub_info.exponent,
{"golden_diff": "diff --git a/cirq-core/cirq/ops/classically_controlled_operation.py b/cirq-core/cirq/ops/classically_controlled_operation.py\n--- a/cirq-core/cirq/ops/classically_controlled_operation.py\n+++ b/cirq-core/cirq/ops/classically_controlled_operation.py\n@@ -149,9 +149,13 @@\n sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None)\n if sub_info is None:\n return NotImplemented # coverage: ignore\n- control_count = len({k for c in self._conditions for k in c.keys})\n- wire_symbols = sub_info.wire_symbols + ('^',) * control_count\n- if any(not isinstance(c, value.KeyCondition) for c in self._conditions):\n+ control_label_count = 0\n+ if args.label_map is not None:\n+ control_label_count = len({k for c in self._conditions for k in c.keys})\n+ wire_symbols = sub_info.wire_symbols + ('^',) * control_label_count\n+ if control_label_count == 0 or any(\n+ not isinstance(c, value.KeyCondition) for c in self._conditions\n+ ):\n wire_symbols = (\n wire_symbols[0]\n + '(conditions=['\n@@ -160,9 +164,9 @@\n ) + wire_symbols[1:]\n exponent_qubit_index = None\n if sub_info.exponent_qubit_index is not None:\n- exponent_qubit_index = sub_info.exponent_qubit_index + control_count\n+ exponent_qubit_index = sub_info.exponent_qubit_index + control_label_count\n elif sub_info.exponent is not None:\n- exponent_qubit_index = control_count\n+ exponent_qubit_index = control_label_count\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n", "issue": "Printing Moment containing classical control fails\n**Description of the issue**\r\n\r\nA classically controlled op causes moment printing to fail. The op itself renders correctly, and a circuit containing the moment renders correctly. Just the moment itself fails to render.\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\nop = cirq.X(cirq.NamedQubit('q')).with_classical_controls('m')\r\nm = cirq.Moment(op)\r\nprint(op)\r\nprint(cirq.Circuit(m))\r\nprint(m)\r\n```\r\n\r\n```\r\nX(q).with_classical_controls(m)\r\n\r\n\r\nq: \u2500\u2500\u2500X\u2500\u2500\u2500\r\n \u2551\r\nm: \u2550\u2550\u2550^\u2550\u2550\u2550\r\n\r\n\r\nValueError: Wanted diagram info from cirq.ClassicallyControlledOperation(cirq.X(cirq.NamedQubit('q')), [cirq.KeyCondition(cirq.MeasurementKey(name='m'))]) for [cirq.NamedQubit('q')]) but got cirq.CircuitDiagramInfo(wire_symbols=('X', '^'), exponent=1.0, connected=True, exponent_qubit_index=1, auto_exponent_parens=True)\r\n\r\ncirq-core/cirq/protocols/circuit_diagram_info_protocol.py:345: ValueError\r\n```\r\n\r\nv0.15\n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n AbstractSet,\n Any,\n Mapping,\n Dict,\n FrozenSet,\n List,\n Optional,\n Sequence,\n TYPE_CHECKING,\n Tuple,\n Union,\n)\n\nimport sympy\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ClassicallyControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to be conditionally executed.\n\n An operation that is classically controlled is executed iff all conditions\n evaluate to True. Currently the only condition type is a measurement key.\n A measurement key evaluates to True iff any qubit in the corresponding\n measurement operation evaluated to a non-zero value.\n\n This object is typically created via\n `operation.with_classical_controls(*conditions)`.\n \"\"\"\n\n def __init__(\n self,\n sub_operation: 'cirq.Operation',\n conditions: Sequence[Union[str, 'cirq.MeasurementKey', 'cirq.Condition', sympy.Basic]],\n ):\n \"\"\"Initializes a `ClassicallyControlledOperation`.\n\n Multiple consecutive `ClassicallyControlledOperation` layers are\n squashed when possible, so one should not depend on a specific number\n of layers.\n\n Args:\n sub_operation: The operation to gate with a classical control\n condition.\n conditions: A sequence of measurement keys, or strings that can be\n parsed into measurement keys.\n\n Raises:\n ValueError: If an unsupported gate is being classically\n controlled.\n \"\"\"\n if protocols.measurement_key_objs(sub_operation):\n raise ValueError(\n f'Cannot conditionally run operations with measurements: {sub_operation}'\n )\n conditions = tuple(conditions)\n if isinstance(sub_operation, ClassicallyControlledOperation):\n conditions += sub_operation._conditions\n sub_operation = sub_operation._sub_operation\n conds: List['cirq.Condition'] = []\n for c in conditions:\n if isinstance(c, str):\n c = value.MeasurementKey.parse_serialized(c)\n if isinstance(c, value.MeasurementKey):\n c = value.KeyCondition(c)\n if isinstance(c, sympy.Basic):\n c = value.SympyCondition(c)\n conds.append(c)\n self._conditions: Tuple['cirq.Condition', ...] = tuple(conds)\n self._sub_operation: 'cirq.Operation' = sub_operation\n\n @property\n def classical_controls(self) -> FrozenSet['cirq.Condition']:\n return frozenset(self._conditions).union(self._sub_operation.classical_controls)\n\n def without_classical_controls(self) -> 'cirq.Operation':\n return self._sub_operation.without_classical_controls()\n\n @property\n def qubits(self):\n return self._sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n return self._sub_operation.with_qubits(*new_qubits).with_classical_controls(\n *self._conditions\n )\n\n def _decompose_(self):\n result = protocols.decompose_once(self._sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ClassicallyControlledOperation(op, self._conditions) for op in result]\n\n def _value_equality_values_(self):\n return (frozenset(self._conditions), self._sub_operation)\n\n def __str__(self) -> str:\n keys = ', '.join(map(str, self._conditions))\n return f'{self._sub_operation}.with_classical_controls({keys})'\n\n def __repr__(self):\n return (\n f'cirq.ClassicallyControlledOperation('\n f'{self._sub_operation!r}, {list(self._conditions)!r})'\n )\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._sub_operation)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self._sub_operation)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'ClassicallyControlledOperation':\n new_sub_op = protocols.resolve_parameters(self._sub_operation, resolver, recursive)\n return ClassicallyControlledOperation(new_sub_op, self._conditions)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=args.known_qubit_count,\n known_qubits=args.known_qubits,\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n label_map=args.label_map,\n )\n sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None)\n if sub_info is None:\n return NotImplemented # coverage: ignore\n control_count = len({k for c in self._conditions for k in c.keys})\n wire_symbols = sub_info.wire_symbols + ('^',) * control_count\n if any(not isinstance(c, value.KeyCondition) for c in self._conditions):\n wire_symbols = (\n wire_symbols[0]\n + '(conditions=['\n + ', '.join(str(c) for c in self._conditions)\n + '])',\n ) + wire_symbols[1:]\n exponent_qubit_index = None\n if sub_info.exponent_qubit_index is not None:\n exponent_qubit_index = sub_info.exponent_qubit_index + control_count\n elif sub_info.exponent is not None:\n exponent_qubit_index = control_count\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=exponent_qubit_index,\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'conditions': self._conditions, 'sub_operation': self._sub_operation}\n\n def _act_on_(self, sim_state: 'cirq.SimulationStateBase') -> bool:\n if all(c.resolve(sim_state.classical_data) for c in self._conditions):\n protocols.act_on(self._sub_operation, sim_state)\n return True\n\n def _with_measurement_key_mapping_(\n self, key_map: Mapping[str, str]\n ) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_measurement_key_mapping(c, key_map) for c in self._conditions]\n sub_operation = protocols.with_measurement_key_mapping(self._sub_operation, key_map)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_key_path_prefix_(self, prefix: Tuple[str, ...]) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_key_path_prefix(c, prefix) for c in self._conditions]\n sub_operation = protocols.with_key_path_prefix(self._sub_operation, prefix)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_rescoped_keys_(\n self, path: Tuple[str, ...], bindable_keys: FrozenSet['cirq.MeasurementKey']\n ) -> 'ClassicallyControlledOperation':\n conds = [protocols.with_rescoped_keys(c, path, bindable_keys) for c in self._conditions]\n sub_operation = protocols.with_rescoped_keys(self._sub_operation, path, bindable_keys)\n return sub_operation.with_classical_controls(*conds)\n\n def _control_keys_(self) -> FrozenSet['cirq.MeasurementKey']:\n local_keys: FrozenSet['cirq.MeasurementKey'] = frozenset(\n k for condition in self._conditions for k in condition.keys\n )\n return local_keys.union(protocols.control_keys(self._sub_operation))\n\n def _qasm_(self, args: 'cirq.QasmArgs') -> Optional[str]:\n args.validate_version('2.0')\n if len(self._conditions) > 1:\n raise ValueError('QASM does not support multiple conditions.')\n subop_qasm = protocols.qasm(self._sub_operation, args=args)\n if not self._conditions:\n return subop_qasm\n return f'if ({self._conditions[0].qasm}) {subop_qasm}'\n", "path": "cirq-core/cirq/ops/classically_controlled_operation.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import (\n AbstractSet,\n Any,\n Mapping,\n Dict,\n FrozenSet,\n List,\n Optional,\n Sequence,\n TYPE_CHECKING,\n Tuple,\n Union,\n)\n\nimport sympy\n\nfrom cirq import protocols, value\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass ClassicallyControlledOperation(raw_types.Operation):\n \"\"\"Augments existing operations to be conditionally executed.\n\n An operation that is classically controlled is executed iff all conditions\n evaluate to True. Currently the only condition type is a measurement key.\n A measurement key evaluates to True iff any qubit in the corresponding\n measurement operation evaluated to a non-zero value.\n\n This object is typically created via\n `operation.with_classical_controls(*conditions)`.\n \"\"\"\n\n def __init__(\n self,\n sub_operation: 'cirq.Operation',\n conditions: Sequence[Union[str, 'cirq.MeasurementKey', 'cirq.Condition', sympy.Basic]],\n ):\n \"\"\"Initializes a `ClassicallyControlledOperation`.\n\n Multiple consecutive `ClassicallyControlledOperation` layers are\n squashed when possible, so one should not depend on a specific number\n of layers.\n\n Args:\n sub_operation: The operation to gate with a classical control\n condition.\n conditions: A sequence of measurement keys, or strings that can be\n parsed into measurement keys.\n\n Raises:\n ValueError: If an unsupported gate is being classically\n controlled.\n \"\"\"\n if protocols.measurement_key_objs(sub_operation):\n raise ValueError(\n f'Cannot conditionally run operations with measurements: {sub_operation}'\n )\n conditions = tuple(conditions)\n if isinstance(sub_operation, ClassicallyControlledOperation):\n conditions += sub_operation._conditions\n sub_operation = sub_operation._sub_operation\n conds: List['cirq.Condition'] = []\n for c in conditions:\n if isinstance(c, str):\n c = value.MeasurementKey.parse_serialized(c)\n if isinstance(c, value.MeasurementKey):\n c = value.KeyCondition(c)\n if isinstance(c, sympy.Basic):\n c = value.SympyCondition(c)\n conds.append(c)\n self._conditions: Tuple['cirq.Condition', ...] = tuple(conds)\n self._sub_operation: 'cirq.Operation' = sub_operation\n\n @property\n def classical_controls(self) -> FrozenSet['cirq.Condition']:\n return frozenset(self._conditions).union(self._sub_operation.classical_controls)\n\n def without_classical_controls(self) -> 'cirq.Operation':\n return self._sub_operation.without_classical_controls()\n\n @property\n def qubits(self):\n return self._sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n return self._sub_operation.with_qubits(*new_qubits).with_classical_controls(\n *self._conditions\n )\n\n def _decompose_(self):\n result = protocols.decompose_once(self._sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ClassicallyControlledOperation(op, self._conditions) for op in result]\n\n def _value_equality_values_(self):\n return (frozenset(self._conditions), self._sub_operation)\n\n def __str__(self) -> str:\n keys = ', '.join(map(str, self._conditions))\n return f'{self._sub_operation}.with_classical_controls({keys})'\n\n def __repr__(self):\n return (\n f'cirq.ClassicallyControlledOperation('\n f'{self._sub_operation!r}, {list(self._conditions)!r})'\n )\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self._sub_operation)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self._sub_operation)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'ClassicallyControlledOperation':\n new_sub_op = protocols.resolve_parameters(self._sub_operation, resolver, recursive)\n return ClassicallyControlledOperation(new_sub_op, self._conditions)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> Optional['protocols.CircuitDiagramInfo']:\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=args.known_qubit_count,\n known_qubits=args.known_qubits,\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n label_map=args.label_map,\n )\n sub_info = protocols.circuit_diagram_info(self._sub_operation, sub_args, None)\n if sub_info is None:\n return NotImplemented # coverage: ignore\n control_label_count = 0\n if args.label_map is not None:\n control_label_count = len({k for c in self._conditions for k in c.keys})\n wire_symbols = sub_info.wire_symbols + ('^',) * control_label_count\n if control_label_count == 0 or any(\n not isinstance(c, value.KeyCondition) for c in self._conditions\n ):\n wire_symbols = (\n wire_symbols[0]\n + '(conditions=['\n + ', '.join(str(c) for c in self._conditions)\n + '])',\n ) + wire_symbols[1:]\n exponent_qubit_index = None\n if sub_info.exponent_qubit_index is not None:\n exponent_qubit_index = sub_info.exponent_qubit_index + control_label_count\n elif sub_info.exponent is not None:\n exponent_qubit_index = control_label_count\n return protocols.CircuitDiagramInfo(\n wire_symbols=wire_symbols,\n exponent=sub_info.exponent,\n exponent_qubit_index=exponent_qubit_index,\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'conditions': self._conditions, 'sub_operation': self._sub_operation}\n\n def _act_on_(self, sim_state: 'cirq.SimulationStateBase') -> bool:\n if all(c.resolve(sim_state.classical_data) for c in self._conditions):\n protocols.act_on(self._sub_operation, sim_state)\n return True\n\n def _with_measurement_key_mapping_(\n self, key_map: Mapping[str, str]\n ) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_measurement_key_mapping(c, key_map) for c in self._conditions]\n sub_operation = protocols.with_measurement_key_mapping(self._sub_operation, key_map)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_key_path_prefix_(self, prefix: Tuple[str, ...]) -> 'ClassicallyControlledOperation':\n conditions = [protocols.with_key_path_prefix(c, prefix) for c in self._conditions]\n sub_operation = protocols.with_key_path_prefix(self._sub_operation, prefix)\n sub_operation = self._sub_operation if sub_operation is NotImplemented else sub_operation\n return sub_operation.with_classical_controls(*conditions)\n\n def _with_rescoped_keys_(\n self, path: Tuple[str, ...], bindable_keys: FrozenSet['cirq.MeasurementKey']\n ) -> 'ClassicallyControlledOperation':\n conds = [protocols.with_rescoped_keys(c, path, bindable_keys) for c in self._conditions]\n sub_operation = protocols.with_rescoped_keys(self._sub_operation, path, bindable_keys)\n return sub_operation.with_classical_controls(*conds)\n\n def _control_keys_(self) -> FrozenSet['cirq.MeasurementKey']:\n local_keys: FrozenSet['cirq.MeasurementKey'] = frozenset(\n k for condition in self._conditions for k in condition.keys\n )\n return local_keys.union(protocols.control_keys(self._sub_operation))\n\n def _qasm_(self, args: 'cirq.QasmArgs') -> Optional[str]:\n args.validate_version('2.0')\n if len(self._conditions) > 1:\n raise ValueError('QASM does not support multiple conditions.')\n subop_qasm = protocols.qasm(self._sub_operation, args=args)\n if not self._conditions:\n return subop_qasm\n return f'if ({self._conditions[0].qasm}) {subop_qasm}'\n", "path": "cirq-core/cirq/ops/classically_controlled_operation.py"}]}
2,982
415
gh_patches_debug_35118
rasdani/github-patches
git_diff
unionai-oss__pandera-1595
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add a polars `Series` type **Is your feature request related to a problem? Please describe.** This would provide a similar `pandera.typing.Series` type for the polars API that exists for pandas and other backends. It is not strictly necessary since it does not get checked, but it would be good to have to match the existing API and to help type checkers understand accessing string column names from the class attribute names. **Describe the solution you'd like** Allow for annotating polars data frame fields as: ```python import pandera.polars as pa from pandera.typing.polars import Series class MySchema(pa.DataFrameModel): a: Series[int] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pandera/api/polars/model.py` Content: ``` 1 """Class-based api for polars models.""" 2 3 from typing import ( 4 Dict, 5 List, 6 Tuple, 7 Type, 8 ) 9 10 import pandas as pd 11 import polars as pl 12 from pandera.api.checks import Check 13 from pandera.api.dataframe.model import ( 14 DataFrameModel as _DataFrameModel, 15 get_dtype_kwargs, 16 ) 17 from pandera.api.dataframe.model_components import FieldInfo 18 from pandera.api.polars.container import DataFrameSchema 19 from pandera.api.polars.components import Column 20 from pandera.api.polars.model_config import BaseConfig 21 from pandera.engines import polars_engine as pe 22 from pandera.errors import SchemaInitError 23 from pandera.typing import AnnotationInfo 24 25 26 class DataFrameModel(_DataFrameModel[pl.LazyFrame, DataFrameSchema]): 27 """Model of a polars :class:`~pandera.api.pandas.container.DataFrameSchema`. 28 29 See the :ref:`User Guide <dataframe-models>` for more. 30 """ 31 32 Config: Type[BaseConfig] = BaseConfig 33 34 @classmethod 35 def build_schema_(cls, **kwargs): 36 return DataFrameSchema( 37 cls._build_columns(cls.__fields__, cls.__checks__), 38 checks=cls.__root_checks__, 39 **kwargs, 40 ) 41 42 @classmethod 43 def _build_columns( # pylint:disable=too-many-locals 44 cls, 45 fields: Dict[str, Tuple[AnnotationInfo, FieldInfo]], 46 checks: Dict[str, List[Check]], 47 ) -> Dict[str, Column]: 48 49 columns: Dict[str, Column] = {} 50 for field_name, (annotation, field) in fields.items(): 51 field_checks = checks.get(field_name, []) 52 field_name = field.name 53 check_name = getattr(field, "check_name", None) 54 55 engine_dtype = None 56 try: 57 engine_dtype = pe.Engine.dtype(annotation.raw_annotation) 58 dtype = engine_dtype.type 59 except TypeError as exc: 60 if annotation.metadata: 61 if field.dtype_kwargs: 62 raise TypeError( 63 "Cannot specify redundant 'dtype_kwargs' " 64 + f"for {annotation.raw_annotation}." 65 + "\n Usage Tip: Drop 'typing.Annotated'." 66 ) from exc 67 dtype_kwargs = get_dtype_kwargs(annotation) 68 dtype = annotation.arg(**dtype_kwargs) # type: ignore 69 elif annotation.default_dtype: 70 dtype = annotation.default_dtype 71 else: 72 dtype = annotation.arg 73 74 if ( 75 annotation.origin is None 76 or isinstance(annotation.origin, pl.datatypes.DataTypeClass) 77 or engine_dtype 78 ): 79 if check_name is False: 80 raise SchemaInitError( 81 f"'check_name' is not supported for {field_name}." 82 ) 83 84 column_kwargs = ( 85 field.column_properties( 86 dtype, 87 required=not annotation.optional, 88 checks=field_checks, 89 name=field_name, 90 ) 91 if field 92 else {} 93 ) 94 columns[field_name] = Column(**column_kwargs) 95 96 else: 97 origin_name = ( 98 f"{annotation.origin.__module__}." 99 f"{annotation.origin.__name__}" 100 ) 101 msg = ( 102 " Series[TYPE] annotations are not supported for polars. " 103 "Use the bare TYPE directly" 104 if origin_name == "pandera.typing.pandas.Series" 105 else "" 106 ) 107 raise SchemaInitError( 108 f"Invalid annotation '{field_name}: " 109 f"{annotation.raw_annotation}'.{msg}" 110 ) 111 112 return columns 113 114 @classmethod 115 def to_json_schema(cls): 116 """Serialize schema metadata into json-schema format. 117 118 :param dataframe_schema: schema to write to json-schema format. 119 120 .. note:: 121 122 This function is currently does not fully specify a pandera schema, 123 and is primarily used internally to render OpenAPI docs via the 124 FastAPI integration. 125 """ 126 schema = cls.to_schema() 127 empty = pd.DataFrame(columns=schema.columns.keys()).astype( 128 {k: v.type for k, v in schema.dtypes.items()} 129 ) 130 table_schema = pd.io.json.build_table_schema(empty) 131 132 def _field_json_schema(field): 133 return { 134 "type": "array", 135 "items": {"type": field["type"]}, 136 } 137 138 return { 139 "title": schema.name or "pandera.DataFrameSchema", 140 "type": "object", 141 "properties": { 142 field["name"]: _field_json_schema(field) 143 for field in table_schema["fields"] 144 }, 145 } 146 ``` Path: `pandera/typing/polars.py` Content: ``` 1 """Pandera type annotations for Polars.""" 2 3 from typing import TYPE_CHECKING, Generic, TypeVar 4 5 from packaging import version 6 7 from pandera.typing.common import DataFrameBase, DataFrameModel 8 9 10 try: 11 import polars as pl 12 13 POLARS_INSTALLED = True 14 except ImportError: 15 POLARS_INSTALLED = False 16 17 18 def polars_version(): 19 """Return the modin version.""" 20 return version.parse(pl.__version__) 21 22 23 # pylint:disable=invalid-name 24 if TYPE_CHECKING: 25 T = TypeVar("T") # pragma: no cover 26 else: 27 T = DataFrameModel 28 29 30 if POLARS_INSTALLED: 31 # pylint: disable=too-few-public-methods 32 class LazyFrame(DataFrameBase, pl.LazyFrame, Generic[T]): 33 """ 34 Pandera generic for pl.LazyFrame, only used for type annotation. 35 36 *new in 0.19.0* 37 """ 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pandera/api/polars/model.py b/pandera/api/polars/model.py --- a/pandera/api/polars/model.py +++ b/pandera/api/polars/model.py @@ -21,6 +21,7 @@ from pandera.engines import polars_engine as pe from pandera.errors import SchemaInitError from pandera.typing import AnnotationInfo +from pandera.typing.polars import Series class DataFrameModel(_DataFrameModel[pl.LazyFrame, DataFrameSchema]): @@ -74,6 +75,7 @@ if ( annotation.origin is None or isinstance(annotation.origin, pl.datatypes.DataTypeClass) + or annotation.origin is Series or engine_dtype ): if check_name is False: @@ -94,19 +96,9 @@ columns[field_name] = Column(**column_kwargs) else: - origin_name = ( - f"{annotation.origin.__module__}." - f"{annotation.origin.__name__}" - ) - msg = ( - " Series[TYPE] annotations are not supported for polars. " - "Use the bare TYPE directly" - if origin_name == "pandera.typing.pandas.Series" - else "" - ) raise SchemaInitError( f"Invalid annotation '{field_name}: " - f"{annotation.raw_annotation}'.{msg}" + f"{annotation.raw_annotation}'." ) return columns diff --git a/pandera/typing/polars.py b/pandera/typing/polars.py --- a/pandera/typing/polars.py +++ b/pandera/typing/polars.py @@ -4,7 +4,11 @@ from packaging import version -from pandera.typing.common import DataFrameBase, DataFrameModel +from pandera.typing.common import ( + DataFrameBase, + DataFrameModel, + SeriesBase, +) try: @@ -35,3 +39,11 @@ *new in 0.19.0* """ + + # pylint: disable=too-few-public-methods + class Series(SeriesBase, pl.Series, Generic[T]): + """ + Pandera generic for pl.Series, only used for type annotation. + + *new in 0.19.0* + """
{"golden_diff": "diff --git a/pandera/api/polars/model.py b/pandera/api/polars/model.py\n--- a/pandera/api/polars/model.py\n+++ b/pandera/api/polars/model.py\n@@ -21,6 +21,7 @@\n from pandera.engines import polars_engine as pe\n from pandera.errors import SchemaInitError\n from pandera.typing import AnnotationInfo\n+from pandera.typing.polars import Series\n \n \n class DataFrameModel(_DataFrameModel[pl.LazyFrame, DataFrameSchema]):\n@@ -74,6 +75,7 @@\n if (\n annotation.origin is None\n or isinstance(annotation.origin, pl.datatypes.DataTypeClass)\n+ or annotation.origin is Series\n or engine_dtype\n ):\n if check_name is False:\n@@ -94,19 +96,9 @@\n columns[field_name] = Column(**column_kwargs)\n \n else:\n- origin_name = (\n- f\"{annotation.origin.__module__}.\"\n- f\"{annotation.origin.__name__}\"\n- )\n- msg = (\n- \" Series[TYPE] annotations are not supported for polars. \"\n- \"Use the bare TYPE directly\"\n- if origin_name == \"pandera.typing.pandas.Series\"\n- else \"\"\n- )\n raise SchemaInitError(\n f\"Invalid annotation '{field_name}: \"\n- f\"{annotation.raw_annotation}'.{msg}\"\n+ f\"{annotation.raw_annotation}'.\"\n )\n \n return columns\ndiff --git a/pandera/typing/polars.py b/pandera/typing/polars.py\n--- a/pandera/typing/polars.py\n+++ b/pandera/typing/polars.py\n@@ -4,7 +4,11 @@\n \n from packaging import version\n \n-from pandera.typing.common import DataFrameBase, DataFrameModel\n+from pandera.typing.common import (\n+ DataFrameBase,\n+ DataFrameModel,\n+ SeriesBase,\n+)\n \n \n try:\n@@ -35,3 +39,11 @@\n \n *new in 0.19.0*\n \"\"\"\n+\n+ # pylint: disable=too-few-public-methods\n+ class Series(SeriesBase, pl.Series, Generic[T]):\n+ \"\"\"\n+ Pandera generic for pl.Series, only used for type annotation.\n+\n+ *new in 0.19.0*\n+ \"\"\"\n", "issue": "Add a polars `Series` type\n**Is your feature request related to a problem? Please describe.**\r\nThis would provide a similar `pandera.typing.Series` type for the polars API that exists for pandas and other backends. It is not strictly necessary since it does not get checked, but it would be good to have to match the existing API and to help type checkers understand accessing string column names from the class attribute names.\r\n\r\n**Describe the solution you'd like**\r\nAllow for annotating polars data frame fields as:\r\n```python\r\nimport pandera.polars as pa\r\nfrom pandera.typing.polars import Series\r\n\r\nclass MySchema(pa.DataFrameModel):\r\n a: Series[int]\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Class-based api for polars models.\"\"\"\n\nfrom typing import (\n Dict,\n List,\n Tuple,\n Type,\n)\n\nimport pandas as pd\nimport polars as pl\nfrom pandera.api.checks import Check\nfrom pandera.api.dataframe.model import (\n DataFrameModel as _DataFrameModel,\n get_dtype_kwargs,\n)\nfrom pandera.api.dataframe.model_components import FieldInfo\nfrom pandera.api.polars.container import DataFrameSchema\nfrom pandera.api.polars.components import Column\nfrom pandera.api.polars.model_config import BaseConfig\nfrom pandera.engines import polars_engine as pe\nfrom pandera.errors import SchemaInitError\nfrom pandera.typing import AnnotationInfo\n\n\nclass DataFrameModel(_DataFrameModel[pl.LazyFrame, DataFrameSchema]):\n \"\"\"Model of a polars :class:`~pandera.api.pandas.container.DataFrameSchema`.\n\n See the :ref:`User Guide <dataframe-models>` for more.\n \"\"\"\n\n Config: Type[BaseConfig] = BaseConfig\n\n @classmethod\n def build_schema_(cls, **kwargs):\n return DataFrameSchema(\n cls._build_columns(cls.__fields__, cls.__checks__),\n checks=cls.__root_checks__,\n **kwargs,\n )\n\n @classmethod\n def _build_columns( # pylint:disable=too-many-locals\n cls,\n fields: Dict[str, Tuple[AnnotationInfo, FieldInfo]],\n checks: Dict[str, List[Check]],\n ) -> Dict[str, Column]:\n\n columns: Dict[str, Column] = {}\n for field_name, (annotation, field) in fields.items():\n field_checks = checks.get(field_name, [])\n field_name = field.name\n check_name = getattr(field, \"check_name\", None)\n\n engine_dtype = None\n try:\n engine_dtype = pe.Engine.dtype(annotation.raw_annotation)\n dtype = engine_dtype.type\n except TypeError as exc:\n if annotation.metadata:\n if field.dtype_kwargs:\n raise TypeError(\n \"Cannot specify redundant 'dtype_kwargs' \"\n + f\"for {annotation.raw_annotation}.\"\n + \"\\n Usage Tip: Drop 'typing.Annotated'.\"\n ) from exc\n dtype_kwargs = get_dtype_kwargs(annotation)\n dtype = annotation.arg(**dtype_kwargs) # type: ignore\n elif annotation.default_dtype:\n dtype = annotation.default_dtype\n else:\n dtype = annotation.arg\n\n if (\n annotation.origin is None\n or isinstance(annotation.origin, pl.datatypes.DataTypeClass)\n or engine_dtype\n ):\n if check_name is False:\n raise SchemaInitError(\n f\"'check_name' is not supported for {field_name}.\"\n )\n\n column_kwargs = (\n field.column_properties(\n dtype,\n required=not annotation.optional,\n checks=field_checks,\n name=field_name,\n )\n if field\n else {}\n )\n columns[field_name] = Column(**column_kwargs)\n\n else:\n origin_name = (\n f\"{annotation.origin.__module__}.\"\n f\"{annotation.origin.__name__}\"\n )\n msg = (\n \" Series[TYPE] annotations are not supported for polars. \"\n \"Use the bare TYPE directly\"\n if origin_name == \"pandera.typing.pandas.Series\"\n else \"\"\n )\n raise SchemaInitError(\n f\"Invalid annotation '{field_name}: \"\n f\"{annotation.raw_annotation}'.{msg}\"\n )\n\n return columns\n\n @classmethod\n def to_json_schema(cls):\n \"\"\"Serialize schema metadata into json-schema format.\n\n :param dataframe_schema: schema to write to json-schema format.\n\n .. note::\n\n This function is currently does not fully specify a pandera schema,\n and is primarily used internally to render OpenAPI docs via the\n FastAPI integration.\n \"\"\"\n schema = cls.to_schema()\n empty = pd.DataFrame(columns=schema.columns.keys()).astype(\n {k: v.type for k, v in schema.dtypes.items()}\n )\n table_schema = pd.io.json.build_table_schema(empty)\n\n def _field_json_schema(field):\n return {\n \"type\": \"array\",\n \"items\": {\"type\": field[\"type\"]},\n }\n\n return {\n \"title\": schema.name or \"pandera.DataFrameSchema\",\n \"type\": \"object\",\n \"properties\": {\n field[\"name\"]: _field_json_schema(field)\n for field in table_schema[\"fields\"]\n },\n }\n", "path": "pandera/api/polars/model.py"}, {"content": "\"\"\"Pandera type annotations for Polars.\"\"\"\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom packaging import version\n\nfrom pandera.typing.common import DataFrameBase, DataFrameModel\n\n\ntry:\n import polars as pl\n\n POLARS_INSTALLED = True\nexcept ImportError:\n POLARS_INSTALLED = False\n\n\ndef polars_version():\n \"\"\"Return the modin version.\"\"\"\n return version.parse(pl.__version__)\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = DataFrameModel\n\n\nif POLARS_INSTALLED:\n # pylint: disable=too-few-public-methods\n class LazyFrame(DataFrameBase, pl.LazyFrame, Generic[T]):\n \"\"\"\n Pandera generic for pl.LazyFrame, only used for type annotation.\n\n *new in 0.19.0*\n \"\"\"\n", "path": "pandera/typing/polars.py"}], "after_files": [{"content": "\"\"\"Class-based api for polars models.\"\"\"\n\nfrom typing import (\n Dict,\n List,\n Tuple,\n Type,\n)\n\nimport pandas as pd\nimport polars as pl\nfrom pandera.api.checks import Check\nfrom pandera.api.dataframe.model import (\n DataFrameModel as _DataFrameModel,\n get_dtype_kwargs,\n)\nfrom pandera.api.dataframe.model_components import FieldInfo\nfrom pandera.api.polars.container import DataFrameSchema\nfrom pandera.api.polars.components import Column\nfrom pandera.api.polars.model_config import BaseConfig\nfrom pandera.engines import polars_engine as pe\nfrom pandera.errors import SchemaInitError\nfrom pandera.typing import AnnotationInfo\nfrom pandera.typing.polars import Series\n\n\nclass DataFrameModel(_DataFrameModel[pl.LazyFrame, DataFrameSchema]):\n \"\"\"Model of a polars :class:`~pandera.api.pandas.container.DataFrameSchema`.\n\n See the :ref:`User Guide <dataframe-models>` for more.\n \"\"\"\n\n Config: Type[BaseConfig] = BaseConfig\n\n @classmethod\n def build_schema_(cls, **kwargs):\n return DataFrameSchema(\n cls._build_columns(cls.__fields__, cls.__checks__),\n checks=cls.__root_checks__,\n **kwargs,\n )\n\n @classmethod\n def _build_columns( # pylint:disable=too-many-locals\n cls,\n fields: Dict[str, Tuple[AnnotationInfo, FieldInfo]],\n checks: Dict[str, List[Check]],\n ) -> Dict[str, Column]:\n\n columns: Dict[str, Column] = {}\n for field_name, (annotation, field) in fields.items():\n field_checks = checks.get(field_name, [])\n field_name = field.name\n check_name = getattr(field, \"check_name\", None)\n\n engine_dtype = None\n try:\n engine_dtype = pe.Engine.dtype(annotation.raw_annotation)\n dtype = engine_dtype.type\n except TypeError as exc:\n if annotation.metadata:\n if field.dtype_kwargs:\n raise TypeError(\n \"Cannot specify redundant 'dtype_kwargs' \"\n + f\"for {annotation.raw_annotation}.\"\n + \"\\n Usage Tip: Drop 'typing.Annotated'.\"\n ) from exc\n dtype_kwargs = get_dtype_kwargs(annotation)\n dtype = annotation.arg(**dtype_kwargs) # type: ignore\n elif annotation.default_dtype:\n dtype = annotation.default_dtype\n else:\n dtype = annotation.arg\n\n if (\n annotation.origin is None\n or isinstance(annotation.origin, pl.datatypes.DataTypeClass)\n or annotation.origin is Series\n or engine_dtype\n ):\n if check_name is False:\n raise SchemaInitError(\n f\"'check_name' is not supported for {field_name}.\"\n )\n\n column_kwargs = (\n field.column_properties(\n dtype,\n required=not annotation.optional,\n checks=field_checks,\n name=field_name,\n )\n if field\n else {}\n )\n columns[field_name] = Column(**column_kwargs)\n\n else:\n raise SchemaInitError(\n f\"Invalid annotation '{field_name}: \"\n f\"{annotation.raw_annotation}'.\"\n )\n\n return columns\n\n @classmethod\n def to_json_schema(cls):\n \"\"\"Serialize schema metadata into json-schema format.\n\n :param dataframe_schema: schema to write to json-schema format.\n\n .. note::\n\n This function is currently does not fully specify a pandera schema,\n and is primarily used internally to render OpenAPI docs via the\n FastAPI integration.\n \"\"\"\n schema = cls.to_schema()\n empty = pd.DataFrame(columns=schema.columns.keys()).astype(\n {k: v.type for k, v in schema.dtypes.items()}\n )\n table_schema = pd.io.json.build_table_schema(empty)\n\n def _field_json_schema(field):\n return {\n \"type\": \"array\",\n \"items\": {\"type\": field[\"type\"]},\n }\n\n return {\n \"title\": schema.name or \"pandera.DataFrameSchema\",\n \"type\": \"object\",\n \"properties\": {\n field[\"name\"]: _field_json_schema(field)\n for field in table_schema[\"fields\"]\n },\n }\n", "path": "pandera/api/polars/model.py"}, {"content": "\"\"\"Pandera type annotations for Polars.\"\"\"\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom packaging import version\n\nfrom pandera.typing.common import (\n DataFrameBase,\n DataFrameModel,\n SeriesBase,\n)\n\n\ntry:\n import polars as pl\n\n POLARS_INSTALLED = True\nexcept ImportError:\n POLARS_INSTALLED = False\n\n\ndef polars_version():\n \"\"\"Return the modin version.\"\"\"\n return version.parse(pl.__version__)\n\n\n# pylint:disable=invalid-name\nif TYPE_CHECKING:\n T = TypeVar(\"T\") # pragma: no cover\nelse:\n T = DataFrameModel\n\n\nif POLARS_INSTALLED:\n # pylint: disable=too-few-public-methods\n class LazyFrame(DataFrameBase, pl.LazyFrame, Generic[T]):\n \"\"\"\n Pandera generic for pl.LazyFrame, only used for type annotation.\n\n *new in 0.19.0*\n \"\"\"\n\n # pylint: disable=too-few-public-methods\n class Series(SeriesBase, pl.Series, Generic[T]):\n \"\"\"\n Pandera generic for pl.Series, only used for type annotation.\n\n *new in 0.19.0*\n \"\"\"\n", "path": "pandera/typing/polars.py"}]}
1,979
517
gh_patches_debug_32970
rasdani/github-patches
git_diff
Kinto__kinto-835
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make the location of version.json configurable Currently @phrawzty had to put the `version.json` at the root location of the container. If the location would be a setting we could specify via env variable in the dockerfile for example Make the location of version.json configurable Currently @phrawzty had to put the `version.json` at the root location of the container. If the location would be a setting we could specify via env variable in the dockerfile for example --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/__init__.py` Content: ``` 1 """Main entry point 2 """ 3 import pkg_resources 4 5 from cornice import Service as CorniceService 6 from pyramid.settings import aslist 7 8 from kinto.core import authentication 9 from kinto.core import errors 10 from kinto.core import events 11 from kinto.core.initialization import ( # NOQA 12 initialize, install_middlewares, 13 load_default_settings) 14 from kinto.core.utils import ( 15 follow_subrequest, current_service, current_resource_name) 16 from kinto.core.logs import logger 17 18 19 # Module version, as defined in PEP-0396. 20 __version__ = pkg_resources.get_distribution('kinto').version # FIXME? 21 22 23 DEFAULT_SETTINGS = { 24 'backoff': None, 25 'batch_max_requests': 25, 26 'cache_backend': '', 27 'cache_url': '', 28 'cache_pool_size': 25, 29 'cache_prefix': '', 30 'cors_origins': '*', 31 'cors_max_age_seconds': 3600, 32 'eos': None, 33 'eos_message': None, 34 'eos_url': None, 35 'error_info_link': 'https://github.com/Kinto/kinto/issues/', 36 'http_host': None, 37 'http_scheme': None, 38 'id_generator': 'kinto.core.storage.generators.UUID4', 39 'includes': '', 40 'initialization_sequence': ( 41 'kinto.core.initialization.setup_request_bound_data', 42 'kinto.core.initialization.setup_json_serializer', 43 'kinto.core.initialization.setup_logging', 44 'kinto.core.initialization.setup_storage', 45 'kinto.core.initialization.setup_permission', 46 'kinto.core.initialization.setup_cache', 47 'kinto.core.initialization.setup_requests_scheme', 48 'kinto.core.initialization.setup_version_redirection', 49 'kinto.core.initialization.setup_deprecation', 50 'kinto.core.initialization.setup_authentication', 51 'kinto.core.initialization.setup_backoff', 52 'kinto.core.initialization.setup_statsd', 53 'kinto.core.initialization.setup_listeners', 54 'kinto.core.events.setup_transaction_hook', 55 ), 56 'event_listeners': '', 57 'heartbeat_timeout_seconds': 10, 58 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer', 59 'newrelic_config': None, 60 'newrelic_env': 'dev', 61 'paginate_by': None, 62 'permission_backend': '', 63 'permission_url': '', 64 'permission_pool_size': 25, 65 'profiler_dir': '/tmp', 66 'profiler_enabled': False, 67 'project_docs': '', 68 'project_name': '', 69 'project_version': '', 70 'readonly': False, 71 'retry_after_seconds': 30, 72 'statsd_backend': 'kinto.core.statsd', 73 'statsd_prefix': 'kinto.core', 74 'statsd_url': None, 75 'storage_backend': '', 76 'storage_url': '', 77 'storage_max_fetch_size': 10000, 78 'storage_pool_size': 25, 79 'tm.annotate_user': False, # Do annotate transactions with the user-id. 80 'transaction_per_request': True, 81 'userid_hmac_secret': '', 82 'version_prefix_redirect_enabled': True, 83 'trailing_slash_redirect_enabled': True, 84 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder', 85 'multiauth.policies': 'basicauth', 86 'multiauth.policy.basicauth.use': ('kinto.core.authentication.' 87 'BasicAuthAuthenticationPolicy'), 88 'multiauth.authorization_policy': ('kinto.core.authorization.' 89 'AuthorizationPolicy') 90 } 91 92 93 class Service(CorniceService): 94 """Subclass of the default cornice service. 95 96 This is useful in order to attach specific behaviours without monkey 97 patching the default cornice service (which would impact other uses of it) 98 """ 99 default_cors_headers = ('Backoff', 'Retry-After', 'Alert', 100 'Content-Length') 101 102 def error_handler(self, error): 103 return errors.json_error_handler(error) 104 105 @classmethod 106 def init_from_settings(cls, settings): 107 cls.cors_origins = tuple(aslist(settings['cors_origins'])) 108 cors_max_age = settings['cors_max_age_seconds'] 109 cls.cors_max_age = int(cors_max_age) if cors_max_age else None 110 111 112 def includeme(config): 113 settings = config.get_settings() 114 115 # Heartbeat registry. 116 config.registry.heartbeats = {} 117 118 # Public settings registry. 119 config.registry.public_settings = {'batch_max_requests', 'readonly'} 120 121 # Directive to declare arbitrary API capabilities. 122 def add_api_capability(config, identifier, description="", url="", **kw): 123 existing = config.registry.api_capabilities.get(identifier) 124 if existing: 125 error_msg = "The '%s' API capability was already registered (%s)." 126 raise ValueError(error_msg % (identifier, existing)) 127 128 capability = dict(description=description, url=url, **kw) 129 config.registry.api_capabilities[identifier] = capability 130 131 config.add_directive('add_api_capability', add_api_capability) 132 config.registry.api_capabilities = {} 133 134 # Resource events helpers. 135 config.add_request_method(events.get_resource_events, 136 name='get_resource_events') 137 config.add_request_method(events.notify_resource_event, 138 name='notify_resource_event') 139 140 # Setup cornice. 141 config.include("cornice") 142 143 # Per-request transaction. 144 config.include("pyramid_tm") 145 146 # Add CORS settings to the base kinto.core Service class. 147 Service.init_from_settings(settings) 148 149 # Setup components. 150 for step in aslist(settings['initialization_sequence']): 151 step_func = config.maybe_dotted(step) 152 step_func(config) 153 154 # Custom helpers. 155 config.add_request_method(follow_subrequest) 156 config.add_request_method(authentication.prefixed_userid, property=True) 157 config.add_request_method(lambda r: {'id': r.prefixed_userid}, 158 name='get_user_info') 159 config.add_request_method(current_resource_name, reify=True) 160 config.add_request_method(current_service, reify=True) 161 config.commit() 162 163 # Include plugins after init, unlike pyramid includes. 164 includes = aslist(settings['includes']) 165 for app in includes: 166 config.include(app) 167 168 # # Show settings to output. 169 # for key, value in settings.items(): 170 # logger.info('Using %s = %s' % (key, value)) 171 172 # Scan views. 173 config.scan("kinto.core.views") 174 175 # Give sign of life. 176 msg = "Running %(project_name)s %(project_version)s." 177 logger.info(msg % settings) 178 ``` Path: `kinto/core/views/version.py` Content: ``` 1 import json 2 import os 3 4 from pyramid import httpexceptions 5 from pyramid.security import NO_PERMISSION_REQUIRED 6 from kinto.core import Service 7 8 HERE = os.path.dirname(os.path.abspath(__file__)) 9 ORIGIN = os.path.dirname(os.path.dirname(HERE)) 10 11 VERSION_PATH = 'version.json' 12 VERSION_JSON = None 13 14 version = Service(name="version", path='/__version__', description="Version") 15 16 VERSIONS_FILES = [ 17 VERSION_PATH, # Relative to the CURRENT_WORKING_DIR. 18 os.path.join(ORIGIN, VERSION_PATH), # Relative to the package. 19 os.path.join(HERE, VERSION_PATH)] # Relative to this file. 20 21 for version_file in VERSIONS_FILES: 22 file_path = os.path.abspath(version_file) 23 if os.path.exists(file_path): 24 with open(file_path) as f: 25 VERSION_JSON = json.load(f) 26 break # The first one wins 27 28 29 @version.get(permission=NO_PERMISSION_REQUIRED) 30 def version_view(request): 31 if VERSION_JSON is not None: 32 return VERSION_JSON 33 34 raise httpexceptions.HTTPNotFound() 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py --- a/kinto/core/__init__.py +++ b/kinto/core/__init__.py @@ -79,6 +79,7 @@ 'tm.annotate_user': False, # Do annotate transactions with the user-id. 'transaction_per_request': True, 'userid_hmac_secret': '', + 'version_json_path': 'version.json', 'version_prefix_redirect_enabled': True, 'trailing_slash_redirect_enabled': True, 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder', diff --git a/kinto/core/views/version.py b/kinto/core/views/version.py --- a/kinto/core/views/version.py +++ b/kinto/core/views/version.py @@ -8,27 +8,27 @@ HERE = os.path.dirname(os.path.abspath(__file__)) ORIGIN = os.path.dirname(os.path.dirname(HERE)) -VERSION_PATH = 'version.json' -VERSION_JSON = None - version = Service(name="version", path='/__version__', description="Version") -VERSIONS_FILES = [ - VERSION_PATH, # Relative to the CURRENT_WORKING_DIR. - os.path.join(ORIGIN, VERSION_PATH), # Relative to the package. - os.path.join(HERE, VERSION_PATH)] # Relative to this file. - -for version_file in VERSIONS_FILES: - file_path = os.path.abspath(version_file) - if os.path.exists(file_path): - with open(file_path) as f: - VERSION_JSON = json.load(f) - break # The first one wins - @version.get(permission=NO_PERMISSION_REQUIRED) def version_view(request): - if VERSION_JSON is not None: - return VERSION_JSON + try: + return version_view.__json__ + except AttributeError: + pass + + location = request.registry.settings['version_json_path'] + files = [ + location, # Default is current working dir. + os.path.join(ORIGIN, 'version.json'), # Relative to the package root. + os.path.join(HERE, 'version.json') # Relative to this file. + ] + for version_file in files: + file_path = os.path.abspath(version_file) + if os.path.exists(file_path): + with open(file_path) as f: + version_view.__json__ = json.load(f) + return version_view.__json__ # First one wins. raise httpexceptions.HTTPNotFound()
{"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -79,6 +79,7 @@\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n+ 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\ndiff --git a/kinto/core/views/version.py b/kinto/core/views/version.py\n--- a/kinto/core/views/version.py\n+++ b/kinto/core/views/version.py\n@@ -8,27 +8,27 @@\n HERE = os.path.dirname(os.path.abspath(__file__))\n ORIGIN = os.path.dirname(os.path.dirname(HERE))\n \n-VERSION_PATH = 'version.json'\n-VERSION_JSON = None\n-\n version = Service(name=\"version\", path='/__version__', description=\"Version\")\n \n-VERSIONS_FILES = [\n- VERSION_PATH, # Relative to the CURRENT_WORKING_DIR.\n- os.path.join(ORIGIN, VERSION_PATH), # Relative to the package.\n- os.path.join(HERE, VERSION_PATH)] # Relative to this file.\n-\n-for version_file in VERSIONS_FILES:\n- file_path = os.path.abspath(version_file)\n- if os.path.exists(file_path):\n- with open(file_path) as f:\n- VERSION_JSON = json.load(f)\n- break # The first one wins\n-\n \n @version.get(permission=NO_PERMISSION_REQUIRED)\n def version_view(request):\n- if VERSION_JSON is not None:\n- return VERSION_JSON\n+ try:\n+ return version_view.__json__\n+ except AttributeError:\n+ pass\n+\n+ location = request.registry.settings['version_json_path']\n+ files = [\n+ location, # Default is current working dir.\n+ os.path.join(ORIGIN, 'version.json'), # Relative to the package root.\n+ os.path.join(HERE, 'version.json') # Relative to this file.\n+ ]\n+ for version_file in files:\n+ file_path = os.path.abspath(version_file)\n+ if os.path.exists(file_path):\n+ with open(file_path) as f:\n+ version_view.__json__ = json.load(f)\n+ return version_view.__json__ # First one wins.\n \n raise httpexceptions.HTTPNotFound()\n", "issue": "Make the location of version.json configurable\nCurrently @phrawzty had to put the `version.json` at the root location of the container.\n\nIf the location would be a setting we could specify via env variable in the dockerfile for example\n\nMake the location of version.json configurable\nCurrently @phrawzty had to put the `version.json` at the root location of the container.\n\nIf the location would be a setting we could specify via env variable in the dockerfile for example\n\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import authentication\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, error):\n return errors.json_error_handler(error)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(authentication.prefixed_userid, property=True)\n config.add_request_method(lambda r: {'id': r.prefixed_userid},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running %(project_name)s %(project_version)s.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}, {"content": "import json\nimport os\n\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom kinto.core import Service\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nORIGIN = os.path.dirname(os.path.dirname(HERE))\n\nVERSION_PATH = 'version.json'\nVERSION_JSON = None\n\nversion = Service(name=\"version\", path='/__version__', description=\"Version\")\n\nVERSIONS_FILES = [\n VERSION_PATH, # Relative to the CURRENT_WORKING_DIR.\n os.path.join(ORIGIN, VERSION_PATH), # Relative to the package.\n os.path.join(HERE, VERSION_PATH)] # Relative to this file.\n\nfor version_file in VERSIONS_FILES:\n file_path = os.path.abspath(version_file)\n if os.path.exists(file_path):\n with open(file_path) as f:\n VERSION_JSON = json.load(f)\n break # The first one wins\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef version_view(request):\n if VERSION_JSON is not None:\n return VERSION_JSON\n\n raise httpexceptions.HTTPNotFound()\n", "path": "kinto/core/views/version.py"}], "after_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import authentication\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, error):\n return errors.json_error_handler(error)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(authentication.prefixed_userid, property=True)\n config.add_request_method(lambda r: {'id': r.prefixed_userid},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running %(project_name)s %(project_version)s.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}, {"content": "import json\nimport os\n\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\nfrom kinto.core import Service\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nORIGIN = os.path.dirname(os.path.dirname(HERE))\n\nversion = Service(name=\"version\", path='/__version__', description=\"Version\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef version_view(request):\n try:\n return version_view.__json__\n except AttributeError:\n pass\n\n location = request.registry.settings['version_json_path']\n files = [\n location, # Default is current working dir.\n os.path.join(ORIGIN, 'version.json'), # Relative to the package root.\n os.path.join(HERE, 'version.json') # Relative to this file.\n ]\n for version_file in files:\n file_path = os.path.abspath(version_file)\n if os.path.exists(file_path):\n with open(file_path) as f:\n version_view.__json__ = json.load(f)\n return version_view.__json__ # First one wins.\n\n raise httpexceptions.HTTPNotFound()\n", "path": "kinto/core/views/version.py"}]}
2,501
550
gh_patches_debug_12166
rasdani/github-patches
git_diff
openstates__openstates-scrapers-1987
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- VT failing since at least 2017-12-02 VT has been failing since 2017-12-02 Based on automated runs it appears that VT has not run successfully in 2 days (2017-12-02). ``` 00:02:03 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.13 00:02:06 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1388 00:02:06 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1388 00:02:07 INFO pupa: save bill S.C.R.13 in 2017-2018 as bill_7bf9480a-d7ef-11e7-8049-0242ac110003.json 00:02:07 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.14 00:02:10 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1435 00:02:10 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1435 00:02:11 INFO pupa: save bill S.C.R.14 in 2017-2018 as bill_7e45aab8-d7ef-11e7-8049-0242ac110003.json 00:02:11 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.15 00:02:13 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1427 00:02:14 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1427 00:02:15 INFO pupa: save bill S.C.R.15 in 2017-2018 as bill_8098e870-d7ef-11e7-8049-0242ac110003.json 00:02:15 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.16 00:02:17 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1458 00:02:17 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1458 00:02:18 INFO pupa: save bill S.C.R.16 in 2017-2018 as bill_82e76e62-d7ef-11e7-8049-0242ac110003.json 00:02:18 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.17 00:02:21 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1459 00:02:21 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1459 00:02:22 INFO pupa: save bill S.C.R.17 in 2017-2018 as bill_8532428c-d7ef-11e7-8049-0242ac110003.json 00:02:22 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.18 00:02:24 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1473 00:02:24 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1473 00:02:25 INFO pupa: save bill S.C.R.18 in 2017-2018 as bill_872c1a4a-d7ef-11e7-8049-0242ac110003.json no pupa_settings on path, using defaults vt (scrape, import) people: {} committees: {} bills: {} import jurisdictions... import organizations... import people... File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module> load_entry_point('pupa', 'console_scripts', 'pupa')() File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 67, in main subcommands[args.subcommand].handle(args, other) Traceback (most recent call last): return self.do_handle(args, other, juris) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 307, in do_handle report['import'] = self.do_import(juris, args) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 207, in do_import report.update(person_importer.import_directory(datadir)) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 190, in import_directory return self.import_data(json_stream()) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 226, in import_data for json_id, data in self._prepare_imports(data_items): File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/people.py", line 33, in _prepare_imports raise SameNameError(name) pupa.exceptions.SameNameError: multiple people with same name "Christopher Mattos" in Jurisdiction - must provide birth_date to disambiguate ``` Visit http://bobsled.openstates.org for more info. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/vt/people.py` Content: ``` 1 import json 2 3 from pupa.scrape import Person, Scraper 4 from openstates.utils import LXMLMixin 5 6 7 class VTPersonScraper(Scraper, LXMLMixin): 8 CHAMBERS = {'Senator': 'upper', 'Representative': 'lower'} 9 10 def scrape(self, session=None): 11 if session is None: 12 session = self.latest_session() 13 year_slug = session[5:] 14 15 # Load all members via the private API 16 legislator_dump_url = ( 17 'http://legislature.vermont.gov/people/loadAll/{}'. 18 format(year_slug)) 19 json_data = self.get(legislator_dump_url).text 20 legislators = json.loads(json_data)['data'] 21 22 # Parse the information from each legislator 23 for info in legislators: 24 # Strip whitespace from strings 25 info = {k: v.strip() for k, v in info.items()} 26 27 # Gather photo URL from the member's page 28 member_url = ('http://legislature.vermont.gov/people/single/{}/{}'. 29 format(year_slug, info['PersonID'])) 30 page = self.lxmlize(member_url) 31 (photo_url, ) = page.xpath('//img[@class="profile-photo"]/@src') 32 33 # Also grab their state email address 34 state_email = page.xpath( 35 '//dl[@class="summary-table profile-summary"]/' 36 'dt[text()="Email"]/following-sibling::dd[1]/a/text()') 37 if state_email: 38 (state_email, ) = state_email 39 else: 40 state_email = None 41 42 district = info['District'].replace(" District", "") 43 if district == 'Grand Isle': 44 district = 'Chittenden-Grand Isle' 45 46 leg = Person( 47 primary_org=self.CHAMBERS[info['Title']], 48 district=district, 49 party=info['Party'].replace("Democrat", "Democratic"), 50 name="{0} {1}".format(info['FirstName'], info['LastName']), 51 image=photo_url 52 ) 53 54 leg.add_contact_detail( 55 note="Capitol Office", 56 type='address', 57 value='Vermont State House\n115 State Street\nMontpelier, VT 05633' 58 ) 59 if state_email: 60 leg.add_contact_detail(note="Capitol Office", type='email', value=state_email) 61 62 leg.add_contact_detail( 63 note="District Office", 64 type='address', 65 value="{0}{1}\n{2}, {3} {4}".format( 66 info['MailingAddress1'], 67 ("\n" + info['MailingAddress2'] 68 if info['MailingAddress2'].strip() 69 else ""), 70 info['MailingCity'], 71 info['MailingState'], 72 info['MailingZIP'] 73 ) 74 ) 75 if info['HomePhone']: 76 leg.add_contact_detail(note="District Office", type='voice', 77 value=info['HomePhone']) 78 district_email = info['Email'] or info['HomeEmail'] or info['WorkEmail'] 79 if district_email: 80 leg.add_contact_detail(note="District Office", type='email', value=district_email) 81 82 leg.add_link(member_url) 83 84 leg.add_source(legislator_dump_url) 85 leg.add_source(member_url) 86 87 yield leg 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/vt/people.py b/openstates/vt/people.py --- a/openstates/vt/people.py +++ b/openstates/vt/people.py @@ -24,6 +24,11 @@ # Strip whitespace from strings info = {k: v.strip() for k, v in info.items()} + # Skip duplicate record for Christopher Mattos (appointed Rep September 2017) + if info['PersonID'] == "29034": + self.info("skipping first Christopher Mattos record") + continue + # Gather photo URL from the member's page member_url = ('http://legislature.vermont.gov/people/single/{}/{}'. format(year_slug, info['PersonID']))
{"golden_diff": "diff --git a/openstates/vt/people.py b/openstates/vt/people.py\n--- a/openstates/vt/people.py\n+++ b/openstates/vt/people.py\n@@ -24,6 +24,11 @@\n # Strip whitespace from strings\n info = {k: v.strip() for k, v in info.items()}\n \n+ # Skip duplicate record for Christopher Mattos (appointed Rep September 2017)\n+ if info['PersonID'] == \"29034\":\n+ self.info(\"skipping first Christopher Mattos record\")\n+ continue\n+\n # Gather photo URL from the member's page\n member_url = ('http://legislature.vermont.gov/people/single/{}/{}'.\n format(year_slug, info['PersonID']))\n", "issue": "VT failing since at least 2017-12-02\nVT has been failing since 2017-12-02\n\nBased on automated runs it appears that VT has not run successfully in 2 days (2017-12-02).\n\n\n```\n 00:02:03 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.13\n00:02:06 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1388\n00:02:06 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1388\n00:02:07 INFO pupa: save bill S.C.R.13 in 2017-2018 as bill_7bf9480a-d7ef-11e7-8049-0242ac110003.json\n00:02:07 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.14\n00:02:10 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1435\n00:02:10 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1435\n00:02:11 INFO pupa: save bill S.C.R.14 in 2017-2018 as bill_7e45aab8-d7ef-11e7-8049-0242ac110003.json\n00:02:11 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.15\n00:02:13 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1427\n00:02:14 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1427\n00:02:15 INFO pupa: save bill S.C.R.15 in 2017-2018 as bill_8098e870-d7ef-11e7-8049-0242ac110003.json\n00:02:15 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.16\n00:02:17 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1458\n00:02:17 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1458\n00:02:18 INFO pupa: save bill S.C.R.16 in 2017-2018 as bill_82e76e62-d7ef-11e7-8049-0242ac110003.json\n00:02:18 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.17\n00:02:21 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1459\n00:02:21 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1459\n00:02:22 INFO pupa: save bill S.C.R.17 in 2017-2018 as bill_8532428c-d7ef-11e7-8049-0242ac110003.json\n00:02:22 INFO scrapelib: GET - http://legislature.vermont.gov/bill/status/2018/S.C.R.18\n00:02:24 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillDetailedStatus/2018/1473\n00:02:24 INFO scrapelib: GET - http://legislature.vermont.gov/bill/loadBillRollCalls/2018/1473\n00:02:25 INFO pupa: save bill S.C.R.18 in 2017-2018 as bill_872c1a4a-d7ef-11e7-8049-0242ac110003.json\nno pupa_settings on path, using defaults\nvt (scrape, import)\n people: {}\n committees: {}\n bills: {}\nimport jurisdictions...\nimport organizations...\nimport people...\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 67, in main\n subcommands[args.subcommand].handle(args, other)\nTraceback (most recent call last):\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 307, in do_handle\n report['import'] = self.do_import(juris, args)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 207, in do_import\n report.update(person_importer.import_directory(datadir))\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 190, in import_directory\n return self.import_data(json_stream())\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 226, in import_data\n for json_id, data in self._prepare_imports(data_items):\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/people.py\", line 33, in _prepare_imports\n raise SameNameError(name)\npupa.exceptions.SameNameError: multiple people with same name \"Christopher Mattos\" in Jurisdiction - must provide birth_date to disambiguate\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import json\n\nfrom pupa.scrape import Person, Scraper\nfrom openstates.utils import LXMLMixin\n\n\nclass VTPersonScraper(Scraper, LXMLMixin):\n CHAMBERS = {'Senator': 'upper', 'Representative': 'lower'}\n\n def scrape(self, session=None):\n if session is None:\n session = self.latest_session()\n year_slug = session[5:]\n\n # Load all members via the private API\n legislator_dump_url = (\n 'http://legislature.vermont.gov/people/loadAll/{}'.\n format(year_slug))\n json_data = self.get(legislator_dump_url).text\n legislators = json.loads(json_data)['data']\n\n # Parse the information from each legislator\n for info in legislators:\n # Strip whitespace from strings\n info = {k: v.strip() for k, v in info.items()}\n\n # Gather photo URL from the member's page\n member_url = ('http://legislature.vermont.gov/people/single/{}/{}'.\n format(year_slug, info['PersonID']))\n page = self.lxmlize(member_url)\n (photo_url, ) = page.xpath('//img[@class=\"profile-photo\"]/@src')\n\n # Also grab their state email address\n state_email = page.xpath(\n '//dl[@class=\"summary-table profile-summary\"]/'\n 'dt[text()=\"Email\"]/following-sibling::dd[1]/a/text()')\n if state_email:\n (state_email, ) = state_email\n else:\n state_email = None\n\n district = info['District'].replace(\" District\", \"\")\n if district == 'Grand Isle':\n district = 'Chittenden-Grand Isle'\n\n leg = Person(\n primary_org=self.CHAMBERS[info['Title']],\n district=district,\n party=info['Party'].replace(\"Democrat\", \"Democratic\"),\n name=\"{0} {1}\".format(info['FirstName'], info['LastName']),\n image=photo_url\n )\n\n leg.add_contact_detail(\n note=\"Capitol Office\",\n type='address',\n value='Vermont State House\\n115 State Street\\nMontpelier, VT 05633'\n )\n if state_email:\n leg.add_contact_detail(note=\"Capitol Office\", type='email', value=state_email)\n\n leg.add_contact_detail(\n note=\"District Office\",\n type='address',\n value=\"{0}{1}\\n{2}, {3} {4}\".format(\n info['MailingAddress1'],\n (\"\\n\" + info['MailingAddress2']\n if info['MailingAddress2'].strip()\n else \"\"),\n info['MailingCity'],\n info['MailingState'],\n info['MailingZIP']\n )\n )\n if info['HomePhone']:\n leg.add_contact_detail(note=\"District Office\", type='voice',\n value=info['HomePhone'])\n district_email = info['Email'] or info['HomeEmail'] or info['WorkEmail']\n if district_email:\n leg.add_contact_detail(note=\"District Office\", type='email', value=district_email)\n\n leg.add_link(member_url)\n\n leg.add_source(legislator_dump_url)\n leg.add_source(member_url)\n\n yield leg\n", "path": "openstates/vt/people.py"}], "after_files": [{"content": "import json\n\nfrom pupa.scrape import Person, Scraper\nfrom openstates.utils import LXMLMixin\n\n\nclass VTPersonScraper(Scraper, LXMLMixin):\n CHAMBERS = {'Senator': 'upper', 'Representative': 'lower'}\n\n def scrape(self, session=None):\n if session is None:\n session = self.latest_session()\n year_slug = session[5:]\n\n # Load all members via the private API\n legislator_dump_url = (\n 'http://legislature.vermont.gov/people/loadAll/{}'.\n format(year_slug))\n json_data = self.get(legislator_dump_url).text\n legislators = json.loads(json_data)['data']\n\n # Parse the information from each legislator\n for info in legislators:\n # Strip whitespace from strings\n info = {k: v.strip() for k, v in info.items()}\n\n # Skip duplicate record for Christopher Mattos (appointed Rep September 2017)\n if info['PersonID'] == \"29034\":\n self.info(\"skipping first Christopher Mattos record\")\n continue\n\n # Gather photo URL from the member's page\n member_url = ('http://legislature.vermont.gov/people/single/{}/{}'.\n format(year_slug, info['PersonID']))\n page = self.lxmlize(member_url)\n (photo_url, ) = page.xpath('//img[@class=\"profile-photo\"]/@src')\n\n # Also grab their state email address\n state_email = page.xpath(\n '//dl[@class=\"summary-table profile-summary\"]/'\n 'dt[text()=\"Email\"]/following-sibling::dd[1]/a/text()')\n if state_email:\n (state_email, ) = state_email\n else:\n state_email = None\n\n district = info['District'].replace(\" District\", \"\")\n if district == 'Grand Isle':\n district = 'Chittenden-Grand Isle'\n\n leg = Person(\n primary_org=self.CHAMBERS[info['Title']],\n district=district,\n party=info['Party'].replace(\"Democrat\", \"Democratic\"),\n name=\"{0} {1}\".format(info['FirstName'], info['LastName']),\n image=photo_url\n )\n\n leg.add_contact_detail(\n note=\"Capitol Office\",\n type='address',\n value='Vermont State House\\n115 State Street\\nMontpelier, VT 05633'\n )\n if state_email:\n leg.add_contact_detail(note=\"Capitol Office\", type='email', value=state_email)\n\n leg.add_contact_detail(\n note=\"District Office\",\n type='address',\n value=\"{0}{1}\\n{2}, {3} {4}\".format(\n info['MailingAddress1'],\n (\"\\n\" + info['MailingAddress2']\n if info['MailingAddress2'].strip()\n else \"\"),\n info['MailingCity'],\n info['MailingState'],\n info['MailingZIP']\n )\n )\n if info['HomePhone']:\n leg.add_contact_detail(note=\"District Office\", type='voice',\n value=info['HomePhone'])\n district_email = info['Email'] or info['HomeEmail'] or info['WorkEmail']\n if district_email:\n leg.add_contact_detail(note=\"District Office\", type='email', value=district_email)\n\n leg.add_link(member_url)\n\n leg.add_source(legislator_dump_url)\n leg.add_source(member_url)\n\n yield leg\n", "path": "openstates/vt/people.py"}]}
2,732
167
gh_patches_debug_12732
rasdani/github-patches
git_diff
acl-org__acl-anthology-238
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Authors with only one name PR #233 tries adding a name variant with only a last name and there are some technical issues there, but there is also the more basic issue of: - Should the XML distinguish between an author with only a first name vs. an author with only a last name? I'm unsure whether this is a real distinction. * Only first name: Chandrahas. * Only last name: Abhimanue, Budiono, Govind, Mausam, Odbal, Shweta, Srinivas, Sukhada, Toni. * Neither: Yes, there are several in ROCLING; presumably these should be deleted. - What about corporate authors like: * ARC A3 * Annotation Group * Architecture Committee * European Association for Machine Translation (maybe shouldn't be an author anyway) * Forschungsgruppe LIMAS * PLUM Group * SRA International * TIPSTER SE --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bin/anthology/people.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2019 Marcel Bollmann <[email protected]> 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from collections import defaultdict, Counter 18 from slugify import slugify 19 import logging as log 20 import yaml 21 from .formatter import bibtex_encode 22 from .venues import VenueIndex 23 24 try: 25 from yaml import CLoader as Loader 26 except ImportError: 27 from yaml import Loader 28 29 30 class PersonName: 31 first, last = "", "" 32 33 def __init__(self, first, last): 34 self.first = first.strip() 35 self.last = last.strip() 36 37 def from_element(person_element): 38 first, last = "", "" 39 for element in person_element: 40 tag = element.tag 41 # These are guaranteed to occur at most once by the schema 42 if tag == "first": 43 first = element.text or "" 44 elif tag == "last": 45 last = element.text or "" 46 return PersonName(first, last) 47 48 def from_repr(repr_): 49 parts = repr_.split(" || ") 50 if len(parts) > 1: 51 first, last = parts[0], parts[1] 52 else: 53 first, last = "", parts[0] 54 return PersonName(first, last) 55 56 def from_dict(dict_): 57 first = dict_.get("first", "") 58 last = dict_["last"] 59 return PersonName(first, last) 60 61 @property 62 def full(self): 63 return "{} {}".format(self.first, self.last).strip() 64 65 @property 66 def id_(self): 67 return repr(self) 68 69 def as_bibtex(self): 70 return bibtex_encode("{}, {}".format(self.last, self.first)) 71 72 def as_dict(self): 73 return {"first": self.first, "last": self.last, "full": self.full} 74 75 def __eq__(self, other): 76 return (self.first == other.first) and (self.last == other.last) 77 78 def __str__(self): 79 return self.full 80 81 def __repr__(self): 82 if self.first: 83 return "{} || {}".format(self.first, self.last) 84 else: 85 return self.last 86 87 def __hash__(self): 88 return hash(repr(self)) 89 90 91 class PersonIndex: 92 """Keeps an index of persons and their associated papers.""" 93 94 def __init__(self, srcdir=None): 95 self.canonical = defaultdict(list) # maps canonical names to variants 96 self.variants = {} # maps variant names to canonical names 97 self._all_slugs = set([""]) 98 self.slugs = {} # maps names to unique slugs 99 self.coauthors = defaultdict(Counter) # maps names to co-author names 100 self.papers = defaultdict(lambda: defaultdict(list)) 101 if srcdir is not None: 102 self.load_variant_list(srcdir) 103 104 def load_variant_list(self, directory): 105 with open("{}/yaml/name_variants.yaml".format(directory), "r") as f: 106 name_list = yaml.load(f, Loader=Loader) 107 for entry in name_list: 108 try: 109 canonical = entry["canonical"] 110 variants = entry["variants"] 111 except (KeyError, TypeError): 112 log.error("Couldn't parse name variant entry: {}".format(entry)) 113 continue 114 canonical = PersonName.from_dict(canonical) 115 _ = self.papers[canonical] # insert empty entry for canonical if not present 116 for variant in variants: 117 variant = PersonName.from_dict(variant) 118 if variant in self.variants: 119 log.error( 120 "Tried to add '{}' as variant of '{}', but is already a variant of '{}'".format( 121 repr(variant), 122 repr(canonical), 123 repr(self.variants[variant]), 124 ) 125 ) 126 continue 127 self.variants[variant] = canonical 128 self.canonical[canonical].append(variant) 129 130 def register(self, paper): 131 """Register all names associated with the given paper.""" 132 from .papers import Paper 133 134 assert isinstance(paper, Paper), "Expected Paper, got {} ({})".format( 135 type(paper), repr(paper) 136 ) 137 for role in ("author", "editor"): 138 for name in paper.get(role, []): 139 # Register paper 140 self.papers[name][role].append(paper.full_id) 141 # Make sure canonical names are prioritized for slugs 142 if self.is_canonical(name): 143 self.get_slug(name) 144 # Register co-author(s) 145 for author in paper.get(role): 146 if author != name: 147 self.coauthors[name][author] += 1 148 149 def names(self): 150 return self.papers.keys() 151 152 def __len__(self): 153 return len(self.papers) 154 155 def is_canonical(self, name): 156 return name not in self.variants 157 158 def has_variants(self, name): 159 return name in self.canonical 160 161 def get_canonical_variant(self, name): 162 """Maps a name to its canonical variant.""" 163 return self.variants.get(name, name) 164 165 def get_all_variants(self, name): 166 """Return a list of all variants for a given name. 167 168 Includes the supplied name itself. 169 """ 170 if not self.is_canonical(name): 171 name = self.get_canonical_variant(name) 172 return self.canonical[name] + [name] 173 174 def get_registered_variants(self, name): 175 """Return a list of variants for a given name that are actually 176 associated with papers. 177 178 Will only return true variants, not including the canonical name. 179 """ 180 if not self.is_canonical(name): 181 name = self.get_canonical_variant(name) 182 return [n for n in self.canonical[name] if n in self.papers] 183 184 def get_slug(self, name): 185 if name in self.slugs: 186 return self.slugs[name] 187 slug, i = slugify(repr(name)), 0 188 while slug in self._all_slugs: 189 i += 1 190 slug = "{}{}".format(slugify(repr(name)), i) 191 self._all_slugs.add(slug) 192 self.slugs[name] = slug 193 return slug 194 195 def get_papers(self, name, role=None, include_variants=False): 196 if include_variants: 197 return [ 198 p 199 for n in self.get_all_variants(name) 200 for p in self.get_papers(n, role=role) 201 ] 202 if role is None: 203 return [p for p_list in self.papers[name].values() for p in p_list] 204 return self.papers[name][role] 205 206 def get_coauthors(self, name, include_variants=False): 207 if include_variants: 208 return [ 209 p for n in self.get_all_variants(name) for p in self.get_coauthors(n) 210 ] 211 return self.coauthors[name].items() 212 213 def get_venues(self, vidx: VenueIndex, name, include_variants=False): 214 """Get a list of venues a person has published in, with counts.""" 215 venues = Counter() 216 if include_variants: 217 for n in self.get_all_variants(name): 218 venues.update(self.get_venues(vidx, n)) 219 else: 220 for paper in self.get_papers(name): 221 for venue in vidx.get_associated_venues(paper): 222 venues[venue] += 1 223 return venues 224 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bin/anthology/people.py b/bin/anthology/people.py --- a/bin/anthology/people.py +++ b/bin/anthology/people.py @@ -115,6 +115,7 @@ _ = self.papers[canonical] # insert empty entry for canonical if not present for variant in variants: variant = PersonName.from_dict(variant) + _ = self.papers[variant] # insert empty entry if not present if variant in self.variants: log.error( "Tried to add '{}' as variant of '{}', but is already a variant of '{}'".format(
{"golden_diff": "diff --git a/bin/anthology/people.py b/bin/anthology/people.py\n--- a/bin/anthology/people.py\n+++ b/bin/anthology/people.py\n@@ -115,6 +115,7 @@\n _ = self.papers[canonical] # insert empty entry for canonical if not present\n for variant in variants:\n variant = PersonName.from_dict(variant)\n+ _ = self.papers[variant] # insert empty entry if not present\n if variant in self.variants:\n log.error(\n \"Tried to add '{}' as variant of '{}', but is already a variant of '{}'\".format(\n", "issue": "Authors with only one name\nPR #233 tries adding a name variant with only a last name and there are some technical issues there, but there is also the more basic issue of:\r\n\r\n- Should the XML distinguish between an author with only a first name vs. an author with only a last name? I'm unsure whether this is a real distinction. \r\n* Only first name: Chandrahas.\r\n* Only last name: Abhimanue, Budiono, Govind, Mausam, Odbal, Shweta, Srinivas, Sukhada, Toni.\r\n* Neither: Yes, there are several in ROCLING; presumably these should be deleted.\r\n\r\n- What about corporate authors like:\r\n* ARC A3\r\n* Annotation Group\r\n* Architecture Committee\r\n* European Association for Machine Translation (maybe shouldn't be an author anyway)\r\n* Forschungsgruppe LIMAS\r\n* PLUM Group\r\n* SRA International\r\n* TIPSTER SE\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import defaultdict, Counter\nfrom slugify import slugify\nimport logging as log\nimport yaml\nfrom .formatter import bibtex_encode\nfrom .venues import VenueIndex\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\nclass PersonName:\n first, last = \"\", \"\"\n\n def __init__(self, first, last):\n self.first = first.strip()\n self.last = last.strip()\n\n def from_element(person_element):\n first, last = \"\", \"\"\n for element in person_element:\n tag = element.tag\n # These are guaranteed to occur at most once by the schema\n if tag == \"first\":\n first = element.text or \"\"\n elif tag == \"last\":\n last = element.text or \"\"\n return PersonName(first, last)\n\n def from_repr(repr_):\n parts = repr_.split(\" || \")\n if len(parts) > 1:\n first, last = parts[0], parts[1]\n else:\n first, last = \"\", parts[0]\n return PersonName(first, last)\n\n def from_dict(dict_):\n first = dict_.get(\"first\", \"\")\n last = dict_[\"last\"]\n return PersonName(first, last)\n\n @property\n def full(self):\n return \"{} {}\".format(self.first, self.last).strip()\n\n @property\n def id_(self):\n return repr(self)\n\n def as_bibtex(self):\n return bibtex_encode(\"{}, {}\".format(self.last, self.first))\n\n def as_dict(self):\n return {\"first\": self.first, \"last\": self.last, \"full\": self.full}\n\n def __eq__(self, other):\n return (self.first == other.first) and (self.last == other.last)\n\n def __str__(self):\n return self.full\n\n def __repr__(self):\n if self.first:\n return \"{} || {}\".format(self.first, self.last)\n else:\n return self.last\n\n def __hash__(self):\n return hash(repr(self))\n\n\nclass PersonIndex:\n \"\"\"Keeps an index of persons and their associated papers.\"\"\"\n\n def __init__(self, srcdir=None):\n self.canonical = defaultdict(list) # maps canonical names to variants\n self.variants = {} # maps variant names to canonical names\n self._all_slugs = set([\"\"])\n self.slugs = {} # maps names to unique slugs\n self.coauthors = defaultdict(Counter) # maps names to co-author names\n self.papers = defaultdict(lambda: defaultdict(list))\n if srcdir is not None:\n self.load_variant_list(srcdir)\n\n def load_variant_list(self, directory):\n with open(\"{}/yaml/name_variants.yaml\".format(directory), \"r\") as f:\n name_list = yaml.load(f, Loader=Loader)\n for entry in name_list:\n try:\n canonical = entry[\"canonical\"]\n variants = entry[\"variants\"]\n except (KeyError, TypeError):\n log.error(\"Couldn't parse name variant entry: {}\".format(entry))\n continue\n canonical = PersonName.from_dict(canonical)\n _ = self.papers[canonical] # insert empty entry for canonical if not present\n for variant in variants:\n variant = PersonName.from_dict(variant)\n if variant in self.variants:\n log.error(\n \"Tried to add '{}' as variant of '{}', but is already a variant of '{}'\".format(\n repr(variant),\n repr(canonical),\n repr(self.variants[variant]),\n )\n )\n continue\n self.variants[variant] = canonical\n self.canonical[canonical].append(variant)\n\n def register(self, paper):\n \"\"\"Register all names associated with the given paper.\"\"\"\n from .papers import Paper\n\n assert isinstance(paper, Paper), \"Expected Paper, got {} ({})\".format(\n type(paper), repr(paper)\n )\n for role in (\"author\", \"editor\"):\n for name in paper.get(role, []):\n # Register paper\n self.papers[name][role].append(paper.full_id)\n # Make sure canonical names are prioritized for slugs\n if self.is_canonical(name):\n self.get_slug(name)\n # Register co-author(s)\n for author in paper.get(role):\n if author != name:\n self.coauthors[name][author] += 1\n\n def names(self):\n return self.papers.keys()\n\n def __len__(self):\n return len(self.papers)\n\n def is_canonical(self, name):\n return name not in self.variants\n\n def has_variants(self, name):\n return name in self.canonical\n\n def get_canonical_variant(self, name):\n \"\"\"Maps a name to its canonical variant.\"\"\"\n return self.variants.get(name, name)\n\n def get_all_variants(self, name):\n \"\"\"Return a list of all variants for a given name.\n\n Includes the supplied name itself.\n \"\"\"\n if not self.is_canonical(name):\n name = self.get_canonical_variant(name)\n return self.canonical[name] + [name]\n\n def get_registered_variants(self, name):\n \"\"\"Return a list of variants for a given name that are actually\n associated with papers.\n\n Will only return true variants, not including the canonical name.\n \"\"\"\n if not self.is_canonical(name):\n name = self.get_canonical_variant(name)\n return [n for n in self.canonical[name] if n in self.papers]\n\n def get_slug(self, name):\n if name in self.slugs:\n return self.slugs[name]\n slug, i = slugify(repr(name)), 0\n while slug in self._all_slugs:\n i += 1\n slug = \"{}{}\".format(slugify(repr(name)), i)\n self._all_slugs.add(slug)\n self.slugs[name] = slug\n return slug\n\n def get_papers(self, name, role=None, include_variants=False):\n if include_variants:\n return [\n p\n for n in self.get_all_variants(name)\n for p in self.get_papers(n, role=role)\n ]\n if role is None:\n return [p for p_list in self.papers[name].values() for p in p_list]\n return self.papers[name][role]\n\n def get_coauthors(self, name, include_variants=False):\n if include_variants:\n return [\n p for n in self.get_all_variants(name) for p in self.get_coauthors(n)\n ]\n return self.coauthors[name].items()\n\n def get_venues(self, vidx: VenueIndex, name, include_variants=False):\n \"\"\"Get a list of venues a person has published in, with counts.\"\"\"\n venues = Counter()\n if include_variants:\n for n in self.get_all_variants(name):\n venues.update(self.get_venues(vidx, n))\n else:\n for paper in self.get_papers(name):\n for venue in vidx.get_associated_venues(paper):\n venues[venue] += 1\n return venues\n", "path": "bin/anthology/people.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import defaultdict, Counter\nfrom slugify import slugify\nimport logging as log\nimport yaml\nfrom .formatter import bibtex_encode\nfrom .venues import VenueIndex\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\nclass PersonName:\n first, last = \"\", \"\"\n\n def __init__(self, first, last):\n self.first = first.strip()\n self.last = last.strip()\n\n def from_element(person_element):\n first, last = \"\", \"\"\n for element in person_element:\n tag = element.tag\n # These are guaranteed to occur at most once by the schema\n if tag == \"first\":\n first = element.text or \"\"\n elif tag == \"last\":\n last = element.text or \"\"\n return PersonName(first, last)\n\n def from_repr(repr_):\n parts = repr_.split(\" || \")\n if len(parts) > 1:\n first, last = parts[0], parts[1]\n else:\n first, last = \"\", parts[0]\n return PersonName(first, last)\n\n def from_dict(dict_):\n first = dict_.get(\"first\", \"\")\n last = dict_[\"last\"]\n return PersonName(first, last)\n\n @property\n def full(self):\n return \"{} {}\".format(self.first, self.last).strip()\n\n @property\n def id_(self):\n return repr(self)\n\n def as_bibtex(self):\n return bibtex_encode(\"{}, {}\".format(self.last, self.first))\n\n def as_dict(self):\n return {\"first\": self.first, \"last\": self.last, \"full\": self.full}\n\n def __eq__(self, other):\n return (self.first == other.first) and (self.last == other.last)\n\n def __str__(self):\n return self.full\n\n def __repr__(self):\n if self.first:\n return \"{} || {}\".format(self.first, self.last)\n else:\n return self.last\n\n def __hash__(self):\n return hash(repr(self))\n\n\nclass PersonIndex:\n \"\"\"Keeps an index of persons and their associated papers.\"\"\"\n\n def __init__(self, srcdir=None):\n self.canonical = defaultdict(list) # maps canonical names to variants\n self.variants = {} # maps variant names to canonical names\n self._all_slugs = set([\"\"])\n self.slugs = {} # maps names to unique slugs\n self.coauthors = defaultdict(Counter) # maps names to co-author names\n self.papers = defaultdict(lambda: defaultdict(list))\n if srcdir is not None:\n self.load_variant_list(srcdir)\n\n def load_variant_list(self, directory):\n with open(\"{}/yaml/name_variants.yaml\".format(directory), \"r\") as f:\n name_list = yaml.load(f, Loader=Loader)\n for entry in name_list:\n try:\n canonical = entry[\"canonical\"]\n variants = entry[\"variants\"]\n except (KeyError, TypeError):\n log.error(\"Couldn't parse name variant entry: {}\".format(entry))\n continue\n canonical = PersonName.from_dict(canonical)\n _ = self.papers[canonical] # insert empty entry for canonical if not present\n for variant in variants:\n variant = PersonName.from_dict(variant)\n _ = self.papers[variant] # insert empty entry if not present\n if variant in self.variants:\n log.error(\n \"Tried to add '{}' as variant of '{}', but is already a variant of '{}'\".format(\n repr(variant),\n repr(canonical),\n repr(self.variants[variant]),\n )\n )\n continue\n self.variants[variant] = canonical\n self.canonical[canonical].append(variant)\n\n def register(self, paper):\n \"\"\"Register all names associated with the given paper.\"\"\"\n from .papers import Paper\n\n assert isinstance(paper, Paper), \"Expected Paper, got {} ({})\".format(\n type(paper), repr(paper)\n )\n for role in (\"author\", \"editor\"):\n for name in paper.get(role, []):\n # Register paper\n self.papers[name][role].append(paper.full_id)\n # Make sure canonical names are prioritized for slugs\n if self.is_canonical(name):\n self.get_slug(name)\n # Register co-author(s)\n for author in paper.get(role):\n if author != name:\n self.coauthors[name][author] += 1\n\n def names(self):\n return self.papers.keys()\n\n def __len__(self):\n return len(self.papers)\n\n def is_canonical(self, name):\n return name not in self.variants\n\n def has_variants(self, name):\n return name in self.canonical\n\n def get_canonical_variant(self, name):\n \"\"\"Maps a name to its canonical variant.\"\"\"\n return self.variants.get(name, name)\n\n def get_all_variants(self, name):\n \"\"\"Return a list of all variants for a given name.\n\n Includes the supplied name itself.\n \"\"\"\n if not self.is_canonical(name):\n name = self.get_canonical_variant(name)\n return self.canonical[name] + [name]\n\n def get_registered_variants(self, name):\n \"\"\"Return a list of variants for a given name that are actually\n associated with papers.\n\n Will only return true variants, not including the canonical name.\n \"\"\"\n if not self.is_canonical(name):\n name = self.get_canonical_variant(name)\n return [n for n in self.canonical[name] if n in self.papers]\n\n def get_slug(self, name):\n if name in self.slugs:\n return self.slugs[name]\n slug, i = slugify(repr(name)), 0\n while slug in self._all_slugs:\n i += 1\n slug = \"{}{}\".format(slugify(repr(name)), i)\n self._all_slugs.add(slug)\n self.slugs[name] = slug\n return slug\n\n def get_papers(self, name, role=None, include_variants=False):\n if include_variants:\n return [\n p\n for n in self.get_all_variants(name)\n for p in self.get_papers(n, role=role)\n ]\n if role is None:\n return [p for p_list in self.papers[name].values() for p in p_list]\n return self.papers[name][role]\n\n def get_coauthors(self, name, include_variants=False):\n if include_variants:\n return [\n p for n in self.get_all_variants(name) for p in self.get_coauthors(n)\n ]\n return self.coauthors[name].items()\n\n def get_venues(self, vidx: VenueIndex, name, include_variants=False):\n \"\"\"Get a list of venues a person has published in, with counts.\"\"\"\n venues = Counter()\n if include_variants:\n for n in self.get_all_variants(name):\n venues.update(self.get_venues(vidx, n))\n else:\n for paper in self.get_papers(name):\n for venue in vidx.get_associated_venues(paper):\n venues[venue] += 1\n return venues\n", "path": "bin/anthology/people.py"}]}
2,727
137
gh_patches_debug_25691
rasdani/github-patches
git_diff
microsoft__DeepSpeed-5342
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Order of partitioning in universal checkpoint **Describe the bug** A clear and concise description of what the bug is. Background: Automatic adjustment of ZeRO's optimizer state partitioning with a new world size is not currently supported. Issues: I use the code from PR #4516 to convert a DeepSpeed checkpoint without TP and PP (128 ranks) to another DeepSpeed checkpoint (32 ranks). In my testing (ZeRO2), I've found that merging in order isn't particularly correct. ``` def check_mp_equal_to_fp32(args): output_folder = "./output" mp_sd = torch.load( os.path.join(output_folder, "output", "mp_rank_00_model_states.pt"), map_location=torch.device("cpu"), ) zero_output_folder = os.path.join(output_folder, "zero") tensor_name_paths = sorted(glob.glob(f"{zero_output_folder}/*")) for tensor_name_path in tensor_name_paths: if "model" not in tensor_name_path: continue tensor_name = os.path.basename(tensor_name_path) fp32 = torch.load(os.path.join(tensor_name_path, "fp32.pt"))["param"].to(mp_sd["module"][tensor_name]) torch.testing.assert_allclose(fp32, mp_sd["module"][tensor_name], msg=f"{tensor_name}, fp32: \n{fp32}, mp_sd: \n{mp_sd['module'][tensor_name]}") ``` In this example, I found that it wasn't the alphabetical order or number order. **Expected behavior** We need to get the linked matrix for partitioning, so that we can get the correct order. Maybe something look like this: ```python def _build_linked_matrix(args: argparse.Namespace, temp_dir: str, mp_sd: dict): linked_matrix = defaultdict(list) reversed_linked_matrix = defaultdict(list) for tensor_name, mp_tensor in mp_sd["module"].items(): fp32_paths = sorted(glob.glob(f"{temp_dir}/{tensor_name}/0/fp32.*")) fp32_tensors = [] for fp32_path in fp32_paths: partition_idx = fp32_path.split("/")[-1].split(".")[-1] fp32_tensor = torch.load(os.path.join(fp32_path)) fp32_tensors.append((partition_idx, fp32_tensor)) flat_tensor = mp_tensor.flatten() visited = [False for _ in range(len(fp32_tensors))] offset = 0 for merge_cnt in range(len(fp32_tensors)): for index, (partition_idx, fp32_tensor) in enumerate(fp32_tensors): numel = fp32_tensor.numel() if visited[index] is False: cloned_flat_tensor = copy.deepcopy(flat_tensor) cloned_flat_tensor[offset : offset + numel] = fp32_tensor.to( cloned_flat_tensor ) if torch.allclose(flat_tensor, cloned_flat_tensor): visited[index] = True # tensor_name -> [(partition_idx, numel)] in temp linked_matrix[tensor_name].append((partition_idx, numel)) # fp32.idx -> [(tensor_name, offset, numel)] # it is ordered because each time we merge one partition. reversed_linked_matrix[partition_idx].append( (tensor_name, offset, numel) ) offset += numel print(tensor_name, linked_matrix[tensor_name], mp_tensor.shape) # linked_matrix["model.transformer_encoder.layers.31.linear1.weight"] # Out: # [('018' 54715476), ('019', 12393388)] # reverse_linked_matrix['019'] # Out: # [('model.transformer_encoder.layers.31.linear1.weight', 54715476, 12393388), # ('model.transformer_encoder.layers.31.linear1.bias', 0, 16384), # ('model.transformer_encoder.layers.31.linear2.weight', 0, 46882008)] # => 54715476 is the offset of 12393388 elements in layers.31.linear1.weight print(linked_matrix, reversed_linked_matrix) torch.save( (linked_matrix, reversed_linked_matrix), os.path.join(args.input_folder, "linked_matrix.pt"), ) return linked_matrix, reversed_linked_matrix ``` **System info (please complete the following information):** - OS: Ubuntu 22.04 - GPU count and types: A100 - Python version: 3.10 - Any other relevant info about your setup --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deepspeed/checkpoint/reshape_utils.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 import os 7 import torch 8 from collections import OrderedDict 9 from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX) 10 11 12 def basic_folder_validation(dir): 13 assert os.path.exists(dir), f'{dir} path does not exist' 14 assert os.path.isdir(dir), f'{dir} is not a folder' 15 16 17 def get_files_with_prefix(all_files, prefix): 18 file_list = [] 19 for file_path in all_files: 20 _, fname = os.path.split(file_path) 21 if fname.startswith(prefix): 22 file_list.append(file_path) 23 24 return sorted(file_list) 25 26 27 def validate_files(file_list): 28 for file in file_list: 29 if not os.path.isfile(file): 30 print(f'Error: {file} is not existent') 31 32 33 def get_files(dir): 34 file_list = [] 35 for root, _, files in os.walk(dir): 36 for file in files: 37 file_list.append(os.path.join(root, file)) 38 return file_list 39 40 41 def get_zero_files(dir): 42 file_list = get_files(dir) 43 for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: 44 zero_files = get_files_with_prefix(file_list, prefix) 45 if len(zero_files) > 0: 46 return zero_files 47 48 return [] 49 50 51 def partition_data(data_list, num_partitions): 52 num_elems = len(data_list) 53 assert num_elems % num_partitions == 0 54 partition_size = num_elems // num_partitions 55 partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)] 56 return partitions_list 57 58 59 def _key_list_to_string(key_list): 60 return '.'.join(key_list) 61 62 63 def merge_state_dict(dict_a, dict_b, key_list): 64 merged_dict = type(dict_a)({}) 65 66 for key, value in dict_b.items(): 67 if key in dict_a.keys(): 68 merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)]) 69 else: 70 merged_dict[key] = value 71 72 return merged_dict 73 74 75 def merge_state_list(list_a, list_b, key_list): 76 if len(list_a) != len(list_b): 77 print(f'{_key_list_to_string(key_list)}') 78 raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}') 79 80 return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)] 81 82 83 def merge_state(state_a, state_b, key_list=[]): 84 if type(state_a) != type(state_b): 85 key_list_string = _key_list_to_string(key_list) 86 print(f'key_list = {key_list_string}') 87 raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}') 88 89 if type(state_a) in (dict, OrderedDict): 90 return merge_state_dict(state_a, state_b, key_list) 91 elif type(state_a) in (list, tuple): 92 return type(state_a)(merge_state_list(state_a, state_b, key_list)) 93 elif torch.is_tensor(state_a): 94 return torch.cat([state_a, state_b], 0) 95 else: 96 return state_a 97 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deepspeed/checkpoint/reshape_utils.py b/deepspeed/checkpoint/reshape_utils.py --- a/deepspeed/checkpoint/reshape_utils.py +++ b/deepspeed/checkpoint/reshape_utils.py @@ -4,9 +4,10 @@ # DeepSpeed Team import os +import re import torch from collections import OrderedDict -from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX) +from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX) def basic_folder_validation(dir): @@ -38,12 +39,28 @@ return file_list +def sort_zero_files(files, prefix): + pattern = f"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)" + rank_pairs = [] + for f in files: + m = re.search(pattern, f) + if m: + dp_rank = int(m.group(1)) + mp_rank = int(m.group(2)) + rank_pairs.append((dp_rank, mp_rank, f)) + else: + raise ValueError(f"Cannot parse dp_rank and mp_rank from {f}") + + sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1])) + return [f for _, _, f in sorted_files] + + def get_zero_files(dir): file_list = get_files(dir) for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: zero_files = get_files_with_prefix(file_list, prefix) if len(zero_files) > 0: - return zero_files + return sort_zero_files(zero_files, prefix) return []
{"golden_diff": "diff --git a/deepspeed/checkpoint/reshape_utils.py b/deepspeed/checkpoint/reshape_utils.py\n--- a/deepspeed/checkpoint/reshape_utils.py\n+++ b/deepspeed/checkpoint/reshape_utils.py\n@@ -4,9 +4,10 @@\n # DeepSpeed Team\n \n import os\n+import re\n import torch\n from collections import OrderedDict\n-from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)\n+from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX)\n \n \n def basic_folder_validation(dir):\n@@ -38,12 +39,28 @@\n return file_list\n \n \n+def sort_zero_files(files, prefix):\n+ pattern = f\"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)\"\n+ rank_pairs = []\n+ for f in files:\n+ m = re.search(pattern, f)\n+ if m:\n+ dp_rank = int(m.group(1))\n+ mp_rank = int(m.group(2))\n+ rank_pairs.append((dp_rank, mp_rank, f))\n+ else:\n+ raise ValueError(f\"Cannot parse dp_rank and mp_rank from {f}\")\n+\n+ sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1]))\n+ return [f for _, _, f in sorted_files]\n+\n+\n def get_zero_files(dir):\n file_list = get_files(dir)\n for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:\n zero_files = get_files_with_prefix(file_list, prefix)\n if len(zero_files) > 0:\n- return zero_files\n+ return sort_zero_files(zero_files, prefix)\n \n return []\n", "issue": "[BUG] Order of partitioning in universal checkpoint\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nBackground:\r\nAutomatic adjustment of ZeRO's optimizer state partitioning with a new world size is not currently supported.\r\n\r\nIssues:\r\nI use the code from PR #4516 to convert a DeepSpeed checkpoint without TP and PP (128 ranks) to another DeepSpeed checkpoint (32 ranks). In my testing (ZeRO2), I've found that merging in order isn't particularly correct.\r\n\r\n```\r\ndef check_mp_equal_to_fp32(args):\r\n output_folder = \"./output\"\r\n\r\n mp_sd = torch.load(\r\n os.path.join(output_folder, \"output\", \"mp_rank_00_model_states.pt\"),\r\n map_location=torch.device(\"cpu\"),\r\n )\r\n zero_output_folder = os.path.join(output_folder, \"zero\")\r\n tensor_name_paths = sorted(glob.glob(f\"{zero_output_folder}/*\"))\r\n for tensor_name_path in tensor_name_paths:\r\n if \"model\" not in tensor_name_path:\r\n continue\r\n tensor_name = os.path.basename(tensor_name_path)\r\n fp32 = torch.load(os.path.join(tensor_name_path, \"fp32.pt\"))[\"param\"].to(mp_sd[\"module\"][tensor_name])\r\n torch.testing.assert_allclose(fp32, mp_sd[\"module\"][tensor_name], msg=f\"{tensor_name}, fp32: \\n{fp32}, mp_sd: \\n{mp_sd['module'][tensor_name]}\")\r\n```\r\n\r\nIn this example, I found that it wasn't the alphabetical order or number order.\r\n\r\n\r\n**Expected behavior**\r\nWe need to get the linked matrix for partitioning, so that we can get the correct order. Maybe something look like this:\r\n\r\n\r\n```python\r\ndef _build_linked_matrix(args: argparse.Namespace, temp_dir: str, mp_sd: dict):\r\n linked_matrix = defaultdict(list)\r\n reversed_linked_matrix = defaultdict(list)\r\n\r\n for tensor_name, mp_tensor in mp_sd[\"module\"].items():\r\n fp32_paths = sorted(glob.glob(f\"{temp_dir}/{tensor_name}/0/fp32.*\"))\r\n fp32_tensors = []\r\n for fp32_path in fp32_paths:\r\n partition_idx = fp32_path.split(\"/\")[-1].split(\".\")[-1]\r\n fp32_tensor = torch.load(os.path.join(fp32_path))\r\n fp32_tensors.append((partition_idx, fp32_tensor))\r\n\r\n flat_tensor = mp_tensor.flatten()\r\n visited = [False for _ in range(len(fp32_tensors))]\r\n offset = 0\r\n for merge_cnt in range(len(fp32_tensors)):\r\n for index, (partition_idx, fp32_tensor) in enumerate(fp32_tensors):\r\n numel = fp32_tensor.numel()\r\n if visited[index] is False:\r\n cloned_flat_tensor = copy.deepcopy(flat_tensor)\r\n cloned_flat_tensor[offset : offset + numel] = fp32_tensor.to(\r\n cloned_flat_tensor\r\n )\r\n if torch.allclose(flat_tensor, cloned_flat_tensor):\r\n visited[index] = True\r\n # tensor_name -> [(partition_idx, numel)] in temp\r\n linked_matrix[tensor_name].append((partition_idx, numel))\r\n # fp32.idx -> [(tensor_name, offset, numel)]\r\n # it is ordered because each time we merge one partition.\r\n reversed_linked_matrix[partition_idx].append(\r\n (tensor_name, offset, numel)\r\n )\r\n offset += numel\r\n print(tensor_name, linked_matrix[tensor_name], mp_tensor.shape)\r\n\r\n # linked_matrix[\"model.transformer_encoder.layers.31.linear1.weight\"]\r\n # Out:\r\n # [('018' 54715476), ('019', 12393388)]\r\n\r\n # reverse_linked_matrix['019']\r\n # Out:\r\n # [('model.transformer_encoder.layers.31.linear1.weight', 54715476, 12393388),\r\n # ('model.transformer_encoder.layers.31.linear1.bias', 0, 16384),\r\n # ('model.transformer_encoder.layers.31.linear2.weight', 0, 46882008)]\r\n # => 54715476 is the offset of 12393388 elements in layers.31.linear1.weight\r\n print(linked_matrix, reversed_linked_matrix)\r\n torch.save(\r\n (linked_matrix, reversed_linked_matrix),\r\n os.path.join(args.input_folder, \"linked_matrix.pt\"),\r\n )\r\n return linked_matrix, reversed_linked_matrix\r\n```\r\n\r\n**System info (please complete the following information):**\r\n - OS: Ubuntu 22.04\r\n - GPU count and types: A100\r\n - Python version: 3.10\r\n - Any other relevant info about your setup\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nimport torch\nfrom collections import OrderedDict\nfrom .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)\n\n\ndef basic_folder_validation(dir):\n assert os.path.exists(dir), f'{dir} path does not exist'\n assert os.path.isdir(dir), f'{dir} is not a folder'\n\n\ndef get_files_with_prefix(all_files, prefix):\n file_list = []\n for file_path in all_files:\n _, fname = os.path.split(file_path)\n if fname.startswith(prefix):\n file_list.append(file_path)\n\n return sorted(file_list)\n\n\ndef validate_files(file_list):\n for file in file_list:\n if not os.path.isfile(file):\n print(f'Error: {file} is not existent')\n\n\ndef get_files(dir):\n file_list = []\n for root, _, files in os.walk(dir):\n for file in files:\n file_list.append(os.path.join(root, file))\n return file_list\n\n\ndef get_zero_files(dir):\n file_list = get_files(dir)\n for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:\n zero_files = get_files_with_prefix(file_list, prefix)\n if len(zero_files) > 0:\n return zero_files\n\n return []\n\n\ndef partition_data(data_list, num_partitions):\n num_elems = len(data_list)\n assert num_elems % num_partitions == 0\n partition_size = num_elems // num_partitions\n partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]\n return partitions_list\n\n\ndef _key_list_to_string(key_list):\n return '.'.join(key_list)\n\n\ndef merge_state_dict(dict_a, dict_b, key_list):\n merged_dict = type(dict_a)({})\n\n for key, value in dict_b.items():\n if key in dict_a.keys():\n merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])\n else:\n merged_dict[key] = value\n\n return merged_dict\n\n\ndef merge_state_list(list_a, list_b, key_list):\n if len(list_a) != len(list_b):\n print(f'{_key_list_to_string(key_list)}')\n raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')\n\n return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]\n\n\ndef merge_state(state_a, state_b, key_list=[]):\n if type(state_a) != type(state_b):\n key_list_string = _key_list_to_string(key_list)\n print(f'key_list = {key_list_string}')\n raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')\n\n if type(state_a) in (dict, OrderedDict):\n return merge_state_dict(state_a, state_b, key_list)\n elif type(state_a) in (list, tuple):\n return type(state_a)(merge_state_list(state_a, state_b, key_list))\n elif torch.is_tensor(state_a):\n return torch.cat([state_a, state_b], 0)\n else:\n return state_a\n", "path": "deepspeed/checkpoint/reshape_utils.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nimport re\nimport torch\nfrom collections import OrderedDict\nfrom .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX)\n\n\ndef basic_folder_validation(dir):\n assert os.path.exists(dir), f'{dir} path does not exist'\n assert os.path.isdir(dir), f'{dir} is not a folder'\n\n\ndef get_files_with_prefix(all_files, prefix):\n file_list = []\n for file_path in all_files:\n _, fname = os.path.split(file_path)\n if fname.startswith(prefix):\n file_list.append(file_path)\n\n return sorted(file_list)\n\n\ndef validate_files(file_list):\n for file in file_list:\n if not os.path.isfile(file):\n print(f'Error: {file} is not existent')\n\n\ndef get_files(dir):\n file_list = []\n for root, _, files in os.walk(dir):\n for file in files:\n file_list.append(os.path.join(root, file))\n return file_list\n\n\ndef sort_zero_files(files, prefix):\n pattern = f\"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)\"\n rank_pairs = []\n for f in files:\n m = re.search(pattern, f)\n if m:\n dp_rank = int(m.group(1))\n mp_rank = int(m.group(2))\n rank_pairs.append((dp_rank, mp_rank, f))\n else:\n raise ValueError(f\"Cannot parse dp_rank and mp_rank from {f}\")\n\n sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1]))\n return [f for _, _, f in sorted_files]\n\n\ndef get_zero_files(dir):\n file_list = get_files(dir)\n for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:\n zero_files = get_files_with_prefix(file_list, prefix)\n if len(zero_files) > 0:\n return sort_zero_files(zero_files, prefix)\n\n return []\n\n\ndef partition_data(data_list, num_partitions):\n num_elems = len(data_list)\n assert num_elems % num_partitions == 0\n partition_size = num_elems // num_partitions\n partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]\n return partitions_list\n\n\ndef _key_list_to_string(key_list):\n return '.'.join(key_list)\n\n\ndef merge_state_dict(dict_a, dict_b, key_list):\n merged_dict = type(dict_a)({})\n\n for key, value in dict_b.items():\n if key in dict_a.keys():\n merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])\n else:\n merged_dict[key] = value\n\n return merged_dict\n\n\ndef merge_state_list(list_a, list_b, key_list):\n if len(list_a) != len(list_b):\n print(f'{_key_list_to_string(key_list)}')\n raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')\n\n return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]\n\n\ndef merge_state(state_a, state_b, key_list=[]):\n if type(state_a) != type(state_b):\n key_list_string = _key_list_to_string(key_list)\n print(f'key_list = {key_list_string}')\n raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')\n\n if type(state_a) in (dict, OrderedDict):\n return merge_state_dict(state_a, state_b, key_list)\n elif type(state_a) in (list, tuple):\n return type(state_a)(merge_state_list(state_a, state_b, key_list))\n elif torch.is_tensor(state_a):\n return torch.cat([state_a, state_b], 0)\n else:\n return state_a\n", "path": "deepspeed/checkpoint/reshape_utils.py"}]}
2,218
397
gh_patches_debug_37332
rasdani/github-patches
git_diff
cobbler__cobbler-2545
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deleting a system (or any object) may not persist across cobbler restarts Problem is seen in ``` Cobbler 3.1.2 source: ?, ? build time: Sun May 31 02:32:34 2020 ``` This bug was actually fixed via commit 63d8312bac0bc4a1567d7e511beddfa89f4e1562 in December 2020. File modules/serializers/file.py functions serialize_item() and serialize_delete() were appending the string ".json" to the filename twice. Rather than trying to delete file "object.json", serialize_delete() would try to delete "object.json.json" which didn't exist, and so the code did nothing. After a cobbler restart, the "object.json" file would be seen and the object re-appeared in cobbler. In this case the object was created before the last cobbler upgrade, so it has the "object.json" filename format, it didn't have the ".json.json" suffix. Objects created after the last cobbler upgrade all have the ".json.json" suffix, there is no "object.json" file. The duplication was removed, but this leaves existing sites with a mess in their collections subdirectories. Depending upon when they created objects relative to when they last upgraded cobbler, they can have files with name "object.json", "object.json.json", or both files can exist simultaneously. Seems like there should be some code added to detect this situation, and rename the "object.json.json" files back to their appropriate "object.json" names. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cobbler/modules/serializers/file.py` Content: ``` 1 """ 2 Cobbler's file-based object serializer. 3 As of 9/2014, this is Cobbler's default serializer and the most stable one. 4 It uses multiple JSON files in /var/lib/cobbler/collections/distros, profiles, etc 5 6 Copyright 2006-2009, Red Hat, Inc and Others 7 Michael DeHaan <michael.dehaan AT gmail> 8 9 This program is free software; you can redistribute it and/or modify 10 it under the terms of the GNU General Public License as published by 11 the Free Software Foundation; either version 2 of the License, or 12 (at your option) any later version. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; if not, write to the Free Software 21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 22 02110-1301 USA 23 """ 24 25 import os 26 import glob 27 import simplejson 28 29 import cobbler.api as capi 30 from cobbler import settings 31 from cobbler.cexceptions import CX 32 33 libpath = "/var/lib/cobbler/collections" 34 35 36 def register(): 37 """ 38 The mandatory Cobbler module registration hook. 39 """ 40 return "serializer" 41 42 43 def what(): 44 """ 45 Module identification function 46 """ 47 return "serializer/file" 48 49 50 def serialize_item(collection, item): 51 """ 52 Save a collection item to file system 53 54 :param collection: collection 55 :param item: collection item 56 """ 57 58 if not item.name: 59 raise CX("name unset for item!") 60 61 collection_types = collection.collection_types() 62 filename = os.path.join(libpath, collection_types, item.name + ".json") 63 64 _dict = item.to_dict() 65 66 if capi.CobblerAPI().settings().serializer_pretty_json: 67 sort_keys = True 68 indent = 4 69 else: 70 sort_keys = False 71 indent = None 72 73 _dict = item.to_dict() 74 with open(filename, "w+") as fd: 75 data = simplejson.dumps(_dict, encoding="utf-8", sort_keys=sort_keys, indent=indent) 76 fd.write(data) 77 78 79 def serialize_delete(collection, item): 80 """ 81 Delete a collection item from file system. 82 83 :param collection: collection 84 :param item: collection item 85 """ 86 87 collection_types = collection.collection_types() 88 filename = os.path.join(libpath, collection_types, item.name + ".json") 89 90 if os.path.exists(filename): 91 os.remove(filename) 92 93 94 def serialize(collection): 95 """ 96 Save a collection to file system 97 98 :param collection: collection 99 """ 100 101 # do not serialize settings 102 ctype = collection.collection_type() 103 if ctype != "settings": 104 for x in collection: 105 serialize_item(collection, x) 106 107 108 def deserialize_raw(collection_types): 109 """ 110 Loads a collection from the disk. 111 112 :param collection_types: The type of collection to load. 113 :return: The loaded dictionary. 114 """ 115 if collection_types == "settings": 116 return settings.read_settings_file() 117 else: 118 results = [] 119 120 path = os.path.join(libpath, collection_types) 121 all_files = glob.glob("%s/*.json" % path) 122 123 for f in all_files: 124 with open(f) as fd: 125 json_data = fd.read() 126 _dict = simplejson.loads(json_data, encoding='utf-8') 127 results.append(_dict) 128 return results 129 130 131 def filter_upgrade_duplicates(file_list): 132 """ 133 In a set of files, some ending with .json, some not, return the list of files with the .json ones taking priority 134 over the ones that are not. 135 136 :param file_list: The list of files to remove duplicates from. 137 :return: The filtered list of files. Normally this should only return ``.json``-Files. 138 """ 139 bases = {} 140 for f in file_list: 141 basekey = f.replace(".json", "") 142 if f.endswith(".json"): 143 bases[basekey] = f 144 else: 145 lookup = bases.get(basekey, "") 146 if not lookup.endswith(".json"): 147 bases[basekey] = f 148 return list(bases.values()) 149 150 151 def deserialize(collection, topological=True): 152 """ 153 Load a collection from file system. 154 155 :param collection: The collection to deserialize. 156 :param topological: If the collection list should be sorted by the 157 collection dict depth value or not. 158 :type topological: bool 159 """ 160 161 datastruct = deserialize_raw(collection.collection_types()) 162 if topological and type(datastruct) == list: 163 datastruct.sort(key=lambda x: x["depth"]) 164 if type(datastruct) == dict: 165 collection.from_dict(datastruct) 166 elif type(datastruct) == list: 167 collection.from_list(datastruct) 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cobbler/modules/serializers/file.py b/cobbler/modules/serializers/file.py --- a/cobbler/modules/serializers/file.py +++ b/cobbler/modules/serializers/file.py @@ -33,20 +33,35 @@ libpath = "/var/lib/cobbler/collections" -def register(): +def register() -> str: """ The mandatory Cobbler module registration hook. """ return "serializer" -def what(): +def what() -> str: """ Module identification function """ return "serializer/file" +def __find_double_json_files(filename: str): + """ + Finds a file with duplicate .json ending and renames it. + :param filename: Filename to be checked + :raises FileExistsError: If both JSON files exist + """ + + if not os.path.isfile(filename): + if os.path.isfile(filename + ".json"): + os.rename(filename + ".json", filename) + else: + if os.path.isfile(filename + ".json"): + raise FileExistsError("Both JSON files (%s) exist!" % filename) + + def serialize_item(collection, item): """ Save a collection item to file system @@ -60,6 +75,7 @@ collection_types = collection.collection_types() filename = os.path.join(libpath, collection_types, item.name + ".json") + __find_double_json_files(filename) _dict = item.to_dict() @@ -86,6 +102,7 @@ collection_types = collection.collection_types() filename = os.path.join(libpath, collection_types, item.name + ".json") + __find_double_json_files(filename) if os.path.exists(filename): os.remove(filename) @@ -128,26 +145,6 @@ return results -def filter_upgrade_duplicates(file_list): - """ - In a set of files, some ending with .json, some not, return the list of files with the .json ones taking priority - over the ones that are not. - - :param file_list: The list of files to remove duplicates from. - :return: The filtered list of files. Normally this should only return ``.json``-Files. - """ - bases = {} - for f in file_list: - basekey = f.replace(".json", "") - if f.endswith(".json"): - bases[basekey] = f - else: - lookup = bases.get(basekey, "") - if not lookup.endswith(".json"): - bases[basekey] = f - return list(bases.values()) - - def deserialize(collection, topological=True): """ Load a collection from file system.
{"golden_diff": "diff --git a/cobbler/modules/serializers/file.py b/cobbler/modules/serializers/file.py\n--- a/cobbler/modules/serializers/file.py\n+++ b/cobbler/modules/serializers/file.py\n@@ -33,20 +33,35 @@\n libpath = \"/var/lib/cobbler/collections\"\n \n \n-def register():\n+def register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"serializer\"\n \n \n-def what():\n+def what() -> str:\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/file\"\n \n \n+def __find_double_json_files(filename: str):\n+ \"\"\"\n+ Finds a file with duplicate .json ending and renames it.\n+ :param filename: Filename to be checked\n+ :raises FileExistsError: If both JSON files exist\n+ \"\"\"\n+\n+ if not os.path.isfile(filename):\n+ if os.path.isfile(filename + \".json\"):\n+ os.rename(filename + \".json\", filename)\n+ else:\n+ if os.path.isfile(filename + \".json\"):\n+ raise FileExistsError(\"Both JSON files (%s) exist!\" % filename)\n+\n+\n def serialize_item(collection, item):\n \"\"\"\n Save a collection item to file system\n@@ -60,6 +75,7 @@\n \n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n+ __find_double_json_files(filename)\n \n _dict = item.to_dict()\n \n@@ -86,6 +102,7 @@\n \n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n+ __find_double_json_files(filename)\n \n if os.path.exists(filename):\n os.remove(filename)\n@@ -128,26 +145,6 @@\n return results\n \n \n-def filter_upgrade_duplicates(file_list):\n- \"\"\"\n- In a set of files, some ending with .json, some not, return the list of files with the .json ones taking priority\n- over the ones that are not.\n-\n- :param file_list: The list of files to remove duplicates from.\n- :return: The filtered list of files. Normally this should only return ``.json``-Files.\n- \"\"\"\n- bases = {}\n- for f in file_list:\n- basekey = f.replace(\".json\", \"\")\n- if f.endswith(\".json\"):\n- bases[basekey] = f\n- else:\n- lookup = bases.get(basekey, \"\")\n- if not lookup.endswith(\".json\"):\n- bases[basekey] = f\n- return list(bases.values())\n-\n-\n def deserialize(collection, topological=True):\n \"\"\"\n Load a collection from file system.\n", "issue": "Deleting a system (or any object) may not persist across cobbler restarts\nProblem is seen in \r\n\r\n```\r\nCobbler 3.1.2\r\n source: ?, ?\r\n build time: Sun May 31 02:32:34 2020\r\n```\r\n\r\nThis bug was actually fixed via commit 63d8312bac0bc4a1567d7e511beddfa89f4e1562 in December 2020. File modules/serializers/file.py functions serialize_item() and serialize_delete() were appending the string \".json\" to the filename twice. Rather than trying to delete file \"object.json\", serialize_delete() would try to delete \"object.json.json\" which didn't exist, and so the code did nothing. After a cobbler restart, the \"object.json\" file would be seen and the object re-appeared in cobbler. In this case the object was created before the last cobbler upgrade, so it has the \"object.json\" filename format, it didn't have the \".json.json\" suffix. Objects created after the last cobbler upgrade all have the \".json.json\" suffix, there is no \"object.json\" file.\r\n\r\nThe duplication was removed, but this leaves existing sites with a mess in their collections subdirectories. Depending upon when they created objects relative to when they last upgraded cobbler, they can have files with name \"object.json\", \"object.json.json\", or both files can exist simultaneously.\r\n\r\nSeems like there should be some code added to detect this situation, and rename the \"object.json.json\" files back to their appropriate \"object.json\" names.\r\n\n", "before_files": [{"content": "\"\"\"\nCobbler's file-based object serializer.\nAs of 9/2014, this is Cobbler's default serializer and the most stable one.\nIt uses multiple JSON files in /var/lib/cobbler/collections/distros, profiles, etc\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport os\nimport glob\nimport simplejson\n\nimport cobbler.api as capi\nfrom cobbler import settings\nfrom cobbler.cexceptions import CX\n\nlibpath = \"/var/lib/cobbler/collections\"\n\n\ndef register():\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"serializer\"\n\n\ndef what():\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/file\"\n\n\ndef serialize_item(collection, item):\n \"\"\"\n Save a collection item to file system\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n if not item.name:\n raise CX(\"name unset for item!\")\n\n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n\n _dict = item.to_dict()\n\n if capi.CobblerAPI().settings().serializer_pretty_json:\n sort_keys = True\n indent = 4\n else:\n sort_keys = False\n indent = None\n\n _dict = item.to_dict()\n with open(filename, \"w+\") as fd:\n data = simplejson.dumps(_dict, encoding=\"utf-8\", sort_keys=sort_keys, indent=indent)\n fd.write(data)\n\n\ndef serialize_delete(collection, item):\n \"\"\"\n Delete a collection item from file system.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n\n if os.path.exists(filename):\n os.remove(filename)\n\n\ndef serialize(collection):\n \"\"\"\n Save a collection to file system\n\n :param collection: collection\n \"\"\"\n\n # do not serialize settings\n ctype = collection.collection_type()\n if ctype != \"settings\":\n for x in collection:\n serialize_item(collection, x)\n\n\ndef deserialize_raw(collection_types):\n \"\"\"\n Loads a collection from the disk.\n\n :param collection_types: The type of collection to load.\n :return: The loaded dictionary.\n \"\"\"\n if collection_types == \"settings\":\n return settings.read_settings_file()\n else:\n results = []\n\n path = os.path.join(libpath, collection_types)\n all_files = glob.glob(\"%s/*.json\" % path)\n\n for f in all_files:\n with open(f) as fd:\n json_data = fd.read()\n _dict = simplejson.loads(json_data, encoding='utf-8')\n results.append(_dict)\n return results\n\n\ndef filter_upgrade_duplicates(file_list):\n \"\"\"\n In a set of files, some ending with .json, some not, return the list of files with the .json ones taking priority\n over the ones that are not.\n\n :param file_list: The list of files to remove duplicates from.\n :return: The filtered list of files. Normally this should only return ``.json``-Files.\n \"\"\"\n bases = {}\n for f in file_list:\n basekey = f.replace(\".json\", \"\")\n if f.endswith(\".json\"):\n bases[basekey] = f\n else:\n lookup = bases.get(basekey, \"\")\n if not lookup.endswith(\".json\"):\n bases[basekey] = f\n return list(bases.values())\n\n\ndef deserialize(collection, topological=True):\n \"\"\"\n Load a collection from file system.\n\n :param collection: The collection to deserialize.\n :param topological: If the collection list should be sorted by the\n collection dict depth value or not.\n :type topological: bool\n \"\"\"\n\n datastruct = deserialize_raw(collection.collection_types())\n if topological and type(datastruct) == list:\n datastruct.sort(key=lambda x: x[\"depth\"])\n if type(datastruct) == dict:\n collection.from_dict(datastruct)\n elif type(datastruct) == list:\n collection.from_list(datastruct)\n", "path": "cobbler/modules/serializers/file.py"}], "after_files": [{"content": "\"\"\"\nCobbler's file-based object serializer.\nAs of 9/2014, this is Cobbler's default serializer and the most stable one.\nIt uses multiple JSON files in /var/lib/cobbler/collections/distros, profiles, etc\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport os\nimport glob\nimport simplejson\n\nimport cobbler.api as capi\nfrom cobbler import settings\nfrom cobbler.cexceptions import CX\n\nlibpath = \"/var/lib/cobbler/collections\"\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n \"\"\"\n return \"serializer\"\n\n\ndef what() -> str:\n \"\"\"\n Module identification function\n \"\"\"\n return \"serializer/file\"\n\n\ndef __find_double_json_files(filename: str):\n \"\"\"\n Finds a file with duplicate .json ending and renames it.\n :param filename: Filename to be checked\n :raises FileExistsError: If both JSON files exist\n \"\"\"\n\n if not os.path.isfile(filename):\n if os.path.isfile(filename + \".json\"):\n os.rename(filename + \".json\", filename)\n else:\n if os.path.isfile(filename + \".json\"):\n raise FileExistsError(\"Both JSON files (%s) exist!\" % filename)\n\n\ndef serialize_item(collection, item):\n \"\"\"\n Save a collection item to file system\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n if not item.name:\n raise CX(\"name unset for item!\")\n\n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n __find_double_json_files(filename)\n\n _dict = item.to_dict()\n\n if capi.CobblerAPI().settings().serializer_pretty_json:\n sort_keys = True\n indent = 4\n else:\n sort_keys = False\n indent = None\n\n _dict = item.to_dict()\n with open(filename, \"w+\") as fd:\n data = simplejson.dumps(_dict, encoding=\"utf-8\", sort_keys=sort_keys, indent=indent)\n fd.write(data)\n\n\ndef serialize_delete(collection, item):\n \"\"\"\n Delete a collection item from file system.\n\n :param collection: collection\n :param item: collection item\n \"\"\"\n\n collection_types = collection.collection_types()\n filename = os.path.join(libpath, collection_types, item.name + \".json\")\n __find_double_json_files(filename)\n\n if os.path.exists(filename):\n os.remove(filename)\n\n\ndef serialize(collection):\n \"\"\"\n Save a collection to file system\n\n :param collection: collection\n \"\"\"\n\n # do not serialize settings\n ctype = collection.collection_type()\n if ctype != \"settings\":\n for x in collection:\n serialize_item(collection, x)\n\n\ndef deserialize_raw(collection_types):\n \"\"\"\n Loads a collection from the disk.\n\n :param collection_types: The type of collection to load.\n :return: The loaded dictionary.\n \"\"\"\n if collection_types == \"settings\":\n return settings.read_settings_file()\n else:\n results = []\n\n path = os.path.join(libpath, collection_types)\n all_files = glob.glob(\"%s/*.json\" % path)\n\n for f in all_files:\n with open(f) as fd:\n json_data = fd.read()\n _dict = simplejson.loads(json_data, encoding='utf-8')\n results.append(_dict)\n return results\n\n\ndef deserialize(collection, topological=True):\n \"\"\"\n Load a collection from file system.\n\n :param collection: The collection to deserialize.\n :param topological: If the collection list should be sorted by the\n collection dict depth value or not.\n :type topological: bool\n \"\"\"\n\n datastruct = deserialize_raw(collection.collection_types())\n if topological and type(datastruct) == list:\n datastruct.sort(key=lambda x: x[\"depth\"])\n if type(datastruct) == dict:\n collection.from_dict(datastruct)\n elif type(datastruct) == list:\n collection.from_list(datastruct)\n", "path": "cobbler/modules/serializers/file.py"}]}
2,118
607
gh_patches_debug_27802
rasdani/github-patches
git_diff
matrix-org__synapse-12018
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove unstable flags for MSC3283 #11933 added support for the stable flags in MSC3283, so we should remove the unstable ones in the next release --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `synapse/rest/client/capabilities.py` Content: ``` 1 # Copyright 2019 New Vector 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import logging 15 from http import HTTPStatus 16 from typing import TYPE_CHECKING, Tuple 17 18 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES 19 from synapse.http.server import HttpServer 20 from synapse.http.servlet import RestServlet 21 from synapse.http.site import SynapseRequest 22 from synapse.types import JsonDict 23 24 from ._base import client_patterns 25 26 if TYPE_CHECKING: 27 from synapse.server import HomeServer 28 29 logger = logging.getLogger(__name__) 30 31 32 class CapabilitiesRestServlet(RestServlet): 33 """End point to expose the capabilities of the server.""" 34 35 PATTERNS = client_patterns("/capabilities$") 36 37 def __init__(self, hs: "HomeServer"): 38 super().__init__() 39 self.hs = hs 40 self.config = hs.config 41 self.auth = hs.get_auth() 42 self.auth_handler = hs.get_auth_handler() 43 44 async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: 45 await self.auth.get_user_by_req(request, allow_guest=True) 46 change_password = self.auth_handler.can_change_password() 47 48 response: JsonDict = { 49 "capabilities": { 50 "m.room_versions": { 51 "default": self.config.server.default_room_version.identifier, 52 "available": { 53 v.identifier: v.disposition 54 for v in KNOWN_ROOM_VERSIONS.values() 55 }, 56 }, 57 "m.change_password": {"enabled": change_password}, 58 "m.set_displayname": { 59 "enabled": self.config.registration.enable_set_displayname 60 }, 61 "m.set_avatar_url": { 62 "enabled": self.config.registration.enable_set_avatar_url 63 }, 64 "m.3pid_changes": { 65 "enabled": self.config.registration.enable_3pid_changes 66 }, 67 } 68 } 69 70 if self.config.experimental.msc3244_enabled: 71 response["capabilities"]["m.room_versions"][ 72 "org.matrix.msc3244.room_capabilities" 73 ] = MSC3244_CAPABILITIES 74 75 # Must be removed in later versions. 76 # Is only included for migration. 77 # Also the parts in `synapse/config/experimental.py`. 78 if self.config.experimental.msc3283_enabled: 79 response["capabilities"]["org.matrix.msc3283.set_displayname"] = { 80 "enabled": self.config.registration.enable_set_displayname 81 } 82 response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = { 83 "enabled": self.config.registration.enable_set_avatar_url 84 } 85 response["capabilities"]["org.matrix.msc3283.3pid_changes"] = { 86 "enabled": self.config.registration.enable_3pid_changes 87 } 88 89 if self.config.experimental.msc3440_enabled: 90 response["capabilities"]["io.element.thread"] = {"enabled": True} 91 92 return HTTPStatus.OK, response 93 94 95 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: 96 CapabilitiesRestServlet(hs).register(http_server) 97 ``` Path: `synapse/config/experimental.py` Content: ``` 1 # Copyright 2021 The Matrix.org Foundation C.I.C. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from synapse.config._base import Config 16 from synapse.types import JsonDict 17 18 19 class ExperimentalConfig(Config): 20 """Config section for enabling experimental features""" 21 22 section = "experimental" 23 24 def read_config(self, config: JsonDict, **kwargs): 25 experimental = config.get("experimental_features") or {} 26 27 # MSC3440 (thread relation) 28 self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False) 29 # MSC3666: including bundled relations in /search. 30 self.msc3666_enabled: bool = experimental.get("msc3666_enabled", False) 31 32 # MSC3026 (busy presence state) 33 self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) 34 35 # MSC2716 (importing historical messages) 36 self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) 37 38 # MSC2285 (hidden read receipts) 39 self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False) 40 41 # MSC3244 (room version capabilities) 42 self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) 43 44 # MSC3283 (set displayname, avatar_url and change 3pid capabilities) 45 self.msc3283_enabled: bool = experimental.get("msc3283_enabled", False) 46 47 # MSC3266 (room summary api) 48 self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) 49 50 # MSC3030 (Jump to date API endpoint) 51 self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False) 52 53 # The portion of MSC3202 which is related to device masquerading. 54 self.msc3202_device_masquerading_enabled: bool = experimental.get( 55 "msc3202_device_masquerading", False 56 ) 57 58 # MSC2409 (this setting only relates to optionally sending to-device messages). 59 # Presence, typing and read receipt EDUs are already sent to application services that 60 # have opted in to receive them. If enabled, this adds to-device messages to that list. 61 self.msc2409_to_device_messages_enabled: bool = experimental.get( 62 "msc2409_to_device_messages_enabled", False 63 ) 64 65 # MSC3706 (server-side support for partial state in /send_join responses) 66 self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) 67 68 # experimental support for faster joins over federation (msc2775, msc3706) 69 # requires a target server with msc3706_enabled enabled. 70 self.faster_joins_enabled: bool = experimental.get("faster_joins", False) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -41,9 +41,6 @@ # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) - # MSC3283 (set displayname, avatar_url and change 3pid capabilities) - self.msc3283_enabled: bool = experimental.get("msc3283_enabled", False) - # MSC3266 (room summary api) self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -72,20 +72,6 @@ "org.matrix.msc3244.room_capabilities" ] = MSC3244_CAPABILITIES - # Must be removed in later versions. - # Is only included for migration. - # Also the parts in `synapse/config/experimental.py`. - if self.config.experimental.msc3283_enabled: - response["capabilities"]["org.matrix.msc3283.set_displayname"] = { - "enabled": self.config.registration.enable_set_displayname - } - response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = { - "enabled": self.config.registration.enable_set_avatar_url - } - response["capabilities"]["org.matrix.msc3283.3pid_changes"] = { - "enabled": self.config.registration.enable_3pid_changes - } - if self.config.experimental.msc3440_enabled: response["capabilities"]["io.element.thread"] = {"enabled": True}
{"golden_diff": "diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py\n--- a/synapse/config/experimental.py\n+++ b/synapse/config/experimental.py\n@@ -41,9 +41,6 @@\n # MSC3244 (room version capabilities)\n self.msc3244_enabled: bool = experimental.get(\"msc3244_enabled\", True)\n \n- # MSC3283 (set displayname, avatar_url and change 3pid capabilities)\n- self.msc3283_enabled: bool = experimental.get(\"msc3283_enabled\", False)\n-\n # MSC3266 (room summary api)\n self.msc3266_enabled: bool = experimental.get(\"msc3266_enabled\", False)\n \ndiff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py\n--- a/synapse/rest/client/capabilities.py\n+++ b/synapse/rest/client/capabilities.py\n@@ -72,20 +72,6 @@\n \"org.matrix.msc3244.room_capabilities\"\n ] = MSC3244_CAPABILITIES\n \n- # Must be removed in later versions.\n- # Is only included for migration.\n- # Also the parts in `synapse/config/experimental.py`.\n- if self.config.experimental.msc3283_enabled:\n- response[\"capabilities\"][\"org.matrix.msc3283.set_displayname\"] = {\n- \"enabled\": self.config.registration.enable_set_displayname\n- }\n- response[\"capabilities\"][\"org.matrix.msc3283.set_avatar_url\"] = {\n- \"enabled\": self.config.registration.enable_set_avatar_url\n- }\n- response[\"capabilities\"][\"org.matrix.msc3283.3pid_changes\"] = {\n- \"enabled\": self.config.registration.enable_3pid_changes\n- }\n-\n if self.config.experimental.msc3440_enabled:\n response[\"capabilities\"][\"io.element.thread\"] = {\"enabled\": True}\n", "issue": "Remove unstable flags for MSC3283\n#11933 added support for the stable flags in MSC3283, so we should remove the unstable ones in the next release\n", "before_files": [{"content": "# Copyright 2019 New Vector\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES\nfrom synapse.http.server import HttpServer\nfrom synapse.http.servlet import RestServlet\nfrom synapse.http.site import SynapseRequest\nfrom synapse.types import JsonDict\n\nfrom ._base import client_patterns\n\nif TYPE_CHECKING:\n from synapse.server import HomeServer\n\nlogger = logging.getLogger(__name__)\n\n\nclass CapabilitiesRestServlet(RestServlet):\n \"\"\"End point to expose the capabilities of the server.\"\"\"\n\n PATTERNS = client_patterns(\"/capabilities$\")\n\n def __init__(self, hs: \"HomeServer\"):\n super().__init__()\n self.hs = hs\n self.config = hs.config\n self.auth = hs.get_auth()\n self.auth_handler = hs.get_auth_handler()\n\n async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n await self.auth.get_user_by_req(request, allow_guest=True)\n change_password = self.auth_handler.can_change_password()\n\n response: JsonDict = {\n \"capabilities\": {\n \"m.room_versions\": {\n \"default\": self.config.server.default_room_version.identifier,\n \"available\": {\n v.identifier: v.disposition\n for v in KNOWN_ROOM_VERSIONS.values()\n },\n },\n \"m.change_password\": {\"enabled\": change_password},\n \"m.set_displayname\": {\n \"enabled\": self.config.registration.enable_set_displayname\n },\n \"m.set_avatar_url\": {\n \"enabled\": self.config.registration.enable_set_avatar_url\n },\n \"m.3pid_changes\": {\n \"enabled\": self.config.registration.enable_3pid_changes\n },\n }\n }\n\n if self.config.experimental.msc3244_enabled:\n response[\"capabilities\"][\"m.room_versions\"][\n \"org.matrix.msc3244.room_capabilities\"\n ] = MSC3244_CAPABILITIES\n\n # Must be removed in later versions.\n # Is only included for migration.\n # Also the parts in `synapse/config/experimental.py`.\n if self.config.experimental.msc3283_enabled:\n response[\"capabilities\"][\"org.matrix.msc3283.set_displayname\"] = {\n \"enabled\": self.config.registration.enable_set_displayname\n }\n response[\"capabilities\"][\"org.matrix.msc3283.set_avatar_url\"] = {\n \"enabled\": self.config.registration.enable_set_avatar_url\n }\n response[\"capabilities\"][\"org.matrix.msc3283.3pid_changes\"] = {\n \"enabled\": self.config.registration.enable_3pid_changes\n }\n\n if self.config.experimental.msc3440_enabled:\n response[\"capabilities\"][\"io.element.thread\"] = {\"enabled\": True}\n\n return HTTPStatus.OK, response\n\n\ndef register_servlets(hs: \"HomeServer\", http_server: HttpServer) -> None:\n CapabilitiesRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/capabilities.py"}, {"content": "# Copyright 2021 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom synapse.config._base import Config\nfrom synapse.types import JsonDict\n\n\nclass ExperimentalConfig(Config):\n \"\"\"Config section for enabling experimental features\"\"\"\n\n section = \"experimental\"\n\n def read_config(self, config: JsonDict, **kwargs):\n experimental = config.get(\"experimental_features\") or {}\n\n # MSC3440 (thread relation)\n self.msc3440_enabled: bool = experimental.get(\"msc3440_enabled\", False)\n # MSC3666: including bundled relations in /search.\n self.msc3666_enabled: bool = experimental.get(\"msc3666_enabled\", False)\n\n # MSC3026 (busy presence state)\n self.msc3026_enabled: bool = experimental.get(\"msc3026_enabled\", False)\n\n # MSC2716 (importing historical messages)\n self.msc2716_enabled: bool = experimental.get(\"msc2716_enabled\", False)\n\n # MSC2285 (hidden read receipts)\n self.msc2285_enabled: bool = experimental.get(\"msc2285_enabled\", False)\n\n # MSC3244 (room version capabilities)\n self.msc3244_enabled: bool = experimental.get(\"msc3244_enabled\", True)\n\n # MSC3283 (set displayname, avatar_url and change 3pid capabilities)\n self.msc3283_enabled: bool = experimental.get(\"msc3283_enabled\", False)\n\n # MSC3266 (room summary api)\n self.msc3266_enabled: bool = experimental.get(\"msc3266_enabled\", False)\n\n # MSC3030 (Jump to date API endpoint)\n self.msc3030_enabled: bool = experimental.get(\"msc3030_enabled\", False)\n\n # The portion of MSC3202 which is related to device masquerading.\n self.msc3202_device_masquerading_enabled: bool = experimental.get(\n \"msc3202_device_masquerading\", False\n )\n\n # MSC2409 (this setting only relates to optionally sending to-device messages).\n # Presence, typing and read receipt EDUs are already sent to application services that\n # have opted in to receive them. If enabled, this adds to-device messages to that list.\n self.msc2409_to_device_messages_enabled: bool = experimental.get(\n \"msc2409_to_device_messages_enabled\", False\n )\n\n # MSC3706 (server-side support for partial state in /send_join responses)\n self.msc3706_enabled: bool = experimental.get(\"msc3706_enabled\", False)\n\n # experimental support for faster joins over federation (msc2775, msc3706)\n # requires a target server with msc3706_enabled enabled.\n self.faster_joins_enabled: bool = experimental.get(\"faster_joins\", False)\n", "path": "synapse/config/experimental.py"}], "after_files": [{"content": "# Copyright 2019 New Vector\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES\nfrom synapse.http.server import HttpServer\nfrom synapse.http.servlet import RestServlet\nfrom synapse.http.site import SynapseRequest\nfrom synapse.types import JsonDict\n\nfrom ._base import client_patterns\n\nif TYPE_CHECKING:\n from synapse.server import HomeServer\n\nlogger = logging.getLogger(__name__)\n\n\nclass CapabilitiesRestServlet(RestServlet):\n \"\"\"End point to expose the capabilities of the server.\"\"\"\n\n PATTERNS = client_patterns(\"/capabilities$\")\n\n def __init__(self, hs: \"HomeServer\"):\n super().__init__()\n self.hs = hs\n self.config = hs.config\n self.auth = hs.get_auth()\n self.auth_handler = hs.get_auth_handler()\n\n async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n await self.auth.get_user_by_req(request, allow_guest=True)\n change_password = self.auth_handler.can_change_password()\n\n response: JsonDict = {\n \"capabilities\": {\n \"m.room_versions\": {\n \"default\": self.config.server.default_room_version.identifier,\n \"available\": {\n v.identifier: v.disposition\n for v in KNOWN_ROOM_VERSIONS.values()\n },\n },\n \"m.change_password\": {\"enabled\": change_password},\n \"m.set_displayname\": {\n \"enabled\": self.config.registration.enable_set_displayname\n },\n \"m.set_avatar_url\": {\n \"enabled\": self.config.registration.enable_set_avatar_url\n },\n \"m.3pid_changes\": {\n \"enabled\": self.config.registration.enable_3pid_changes\n },\n }\n }\n\n if self.config.experimental.msc3244_enabled:\n response[\"capabilities\"][\"m.room_versions\"][\n \"org.matrix.msc3244.room_capabilities\"\n ] = MSC3244_CAPABILITIES\n\n if self.config.experimental.msc3440_enabled:\n response[\"capabilities\"][\"io.element.thread\"] = {\"enabled\": True}\n\n return HTTPStatus.OK, response\n\n\ndef register_servlets(hs: \"HomeServer\", http_server: HttpServer) -> None:\n CapabilitiesRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/capabilities.py"}, {"content": "# Copyright 2021 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom synapse.config._base import Config\nfrom synapse.types import JsonDict\n\n\nclass ExperimentalConfig(Config):\n \"\"\"Config section for enabling experimental features\"\"\"\n\n section = \"experimental\"\n\n def read_config(self, config: JsonDict, **kwargs):\n experimental = config.get(\"experimental_features\") or {}\n\n # MSC3440 (thread relation)\n self.msc3440_enabled: bool = experimental.get(\"msc3440_enabled\", False)\n # MSC3666: including bundled relations in /search.\n self.msc3666_enabled: bool = experimental.get(\"msc3666_enabled\", False)\n\n # MSC3026 (busy presence state)\n self.msc3026_enabled: bool = experimental.get(\"msc3026_enabled\", False)\n\n # MSC2716 (importing historical messages)\n self.msc2716_enabled: bool = experimental.get(\"msc2716_enabled\", False)\n\n # MSC2285 (hidden read receipts)\n self.msc2285_enabled: bool = experimental.get(\"msc2285_enabled\", False)\n\n # MSC3244 (room version capabilities)\n self.msc3244_enabled: bool = experimental.get(\"msc3244_enabled\", True)\n\n # MSC3266 (room summary api)\n self.msc3266_enabled: bool = experimental.get(\"msc3266_enabled\", False)\n\n # MSC3030 (Jump to date API endpoint)\n self.msc3030_enabled: bool = experimental.get(\"msc3030_enabled\", False)\n\n # The portion of MSC3202 which is related to device masquerading.\n self.msc3202_device_masquerading_enabled: bool = experimental.get(\n \"msc3202_device_masquerading\", False\n )\n\n # MSC2409 (this setting only relates to optionally sending to-device messages).\n # Presence, typing and read receipt EDUs are already sent to application services that\n # have opted in to receive them. If enabled, this adds to-device messages to that list.\n self.msc2409_to_device_messages_enabled: bool = experimental.get(\n \"msc2409_to_device_messages_enabled\", False\n )\n\n # MSC3706 (server-side support for partial state in /send_join responses)\n self.msc3706_enabled: bool = experimental.get(\"msc3706_enabled\", False)\n\n # experimental support for faster joins over federation (msc2775, msc3706)\n # requires a target server with msc3706_enabled enabled.\n self.faster_joins_enabled: bool = experimental.get(\"faster_joins\", False)\n", "path": "synapse/config/experimental.py"}]}
2,232
450
gh_patches_debug_31431
rasdani/github-patches
git_diff
CTFd__CTFd-1581
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Hidden scores, hides graphs for admins as well Hidden scores, hides graphs for admins as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/utils/decorators/visibility.py` Content: ``` 1 import functools 2 3 from flask import abort, redirect, render_template, request, url_for 4 5 from CTFd.constants.config import ( 6 AccountVisibilityTypes, 7 ChallengeVisibilityTypes, 8 ConfigTypes, 9 RegistrationVisibilityTypes, 10 ScoreVisibilityTypes, 11 ) 12 from CTFd.utils import get_config 13 from CTFd.utils.user import authed, is_admin 14 15 16 def check_score_visibility(f): 17 @functools.wraps(f) 18 def _check_score_visibility(*args, **kwargs): 19 v = get_config(ConfigTypes.SCORE_VISIBILITY) 20 if v == ScoreVisibilityTypes.PUBLIC: 21 return f(*args, **kwargs) 22 23 elif v == ScoreVisibilityTypes.PRIVATE: 24 if authed(): 25 return f(*args, **kwargs) 26 else: 27 if request.content_type == "application/json": 28 abort(403) 29 else: 30 return redirect(url_for("auth.login", next=request.full_path)) 31 32 elif v == ScoreVisibilityTypes.HIDDEN: 33 return ( 34 render_template("errors/403.html", error="Scores are currently hidden"), 35 403, 36 ) 37 38 elif v == ScoreVisibilityTypes.ADMINS: 39 if is_admin(): 40 return f(*args, **kwargs) 41 else: 42 abort(404) 43 44 return _check_score_visibility 45 46 47 def check_challenge_visibility(f): 48 @functools.wraps(f) 49 def _check_challenge_visibility(*args, **kwargs): 50 v = get_config(ConfigTypes.CHALLENGE_VISIBILITY) 51 if v == ChallengeVisibilityTypes.PUBLIC: 52 return f(*args, **kwargs) 53 54 elif v == ChallengeVisibilityTypes.PRIVATE: 55 if authed(): 56 return f(*args, **kwargs) 57 else: 58 if request.content_type == "application/json": 59 abort(403) 60 else: 61 return redirect(url_for("auth.login", next=request.full_path)) 62 63 elif v == ChallengeVisibilityTypes.ADMINS: 64 if is_admin(): 65 return f(*args, **kwargs) 66 else: 67 if authed(): 68 abort(403) 69 else: 70 return redirect(url_for("auth.login", next=request.full_path)) 71 72 return _check_challenge_visibility 73 74 75 def check_account_visibility(f): 76 @functools.wraps(f) 77 def _check_account_visibility(*args, **kwargs): 78 v = get_config(ConfigTypes.ACCOUNT_VISIBILITY) 79 if v == AccountVisibilityTypes.PUBLIC: 80 return f(*args, **kwargs) 81 82 elif v == AccountVisibilityTypes.PRIVATE: 83 if authed(): 84 return f(*args, **kwargs) 85 else: 86 if request.content_type == "application/json": 87 abort(403) 88 else: 89 return redirect(url_for("auth.login", next=request.full_path)) 90 91 elif v == AccountVisibilityTypes.ADMINS: 92 if is_admin(): 93 return f(*args, **kwargs) 94 else: 95 abort(404) 96 97 return _check_account_visibility 98 99 100 def check_registration_visibility(f): 101 @functools.wraps(f) 102 def _check_registration_visibility(*args, **kwargs): 103 v = get_config(ConfigTypes.REGISTRATION_VISIBILITY) 104 if v == RegistrationVisibilityTypes.PUBLIC: 105 return f(*args, **kwargs) 106 elif v == RegistrationVisibilityTypes.PRIVATE: 107 abort(404) 108 109 return _check_registration_visibility 110 ``` Path: `CTFd/scoreboard.py` Content: ``` 1 from flask import Blueprint, render_template 2 3 from CTFd.cache import cache, make_cache_key 4 from CTFd.utils import config 5 from CTFd.utils.decorators.visibility import check_score_visibility 6 from CTFd.utils.helpers import get_infos 7 from CTFd.utils.scores import get_standings 8 9 scoreboard = Blueprint("scoreboard", __name__) 10 11 12 @scoreboard.route("/scoreboard") 13 @check_score_visibility 14 @cache.cached(timeout=60, key_prefix=make_cache_key) 15 def listing(): 16 infos = get_infos() 17 18 if config.is_scoreboard_frozen(): 19 infos.append("Scoreboard has been frozen") 20 21 standings = get_standings() 22 return render_template("scoreboard.html", standings=standings, infos=infos) 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/scoreboard.py b/CTFd/scoreboard.py --- a/CTFd/scoreboard.py +++ b/CTFd/scoreboard.py @@ -2,9 +2,11 @@ from CTFd.cache import cache, make_cache_key from CTFd.utils import config +from CTFd.utils.config.visibility import scores_visible from CTFd.utils.decorators.visibility import check_score_visibility from CTFd.utils.helpers import get_infos from CTFd.utils.scores import get_standings +from CTFd.utils.user import is_admin scoreboard = Blueprint("scoreboard", __name__) @@ -18,5 +20,8 @@ if config.is_scoreboard_frozen(): infos.append("Scoreboard has been frozen") + if is_admin() is True and scores_visible() is False: + infos.append("Scores are not currently visible to users") + standings = get_standings() return render_template("scoreboard.html", standings=standings, infos=infos) diff --git a/CTFd/utils/decorators/visibility.py b/CTFd/utils/decorators/visibility.py --- a/CTFd/utils/decorators/visibility.py +++ b/CTFd/utils/decorators/visibility.py @@ -30,10 +30,18 @@ return redirect(url_for("auth.login", next=request.full_path)) elif v == ScoreVisibilityTypes.HIDDEN: - return ( - render_template("errors/403.html", error="Scores are currently hidden"), - 403, - ) + if is_admin(): + return f(*args, **kwargs) + else: + if request.content_type == "application/json": + abort(403) + else: + return ( + render_template( + "errors/403.html", error="Scores are currently hidden" + ), + 403, + ) elif v == ScoreVisibilityTypes.ADMINS: if is_admin():
{"golden_diff": "diff --git a/CTFd/scoreboard.py b/CTFd/scoreboard.py\n--- a/CTFd/scoreboard.py\n+++ b/CTFd/scoreboard.py\n@@ -2,9 +2,11 @@\n \n from CTFd.cache import cache, make_cache_key\n from CTFd.utils import config\n+from CTFd.utils.config.visibility import scores_visible\n from CTFd.utils.decorators.visibility import check_score_visibility\n from CTFd.utils.helpers import get_infos\n from CTFd.utils.scores import get_standings\n+from CTFd.utils.user import is_admin\n \n scoreboard = Blueprint(\"scoreboard\", __name__)\n \n@@ -18,5 +20,8 @@\n if config.is_scoreboard_frozen():\n infos.append(\"Scoreboard has been frozen\")\n \n+ if is_admin() is True and scores_visible() is False:\n+ infos.append(\"Scores are not currently visible to users\")\n+\n standings = get_standings()\n return render_template(\"scoreboard.html\", standings=standings, infos=infos)\ndiff --git a/CTFd/utils/decorators/visibility.py b/CTFd/utils/decorators/visibility.py\n--- a/CTFd/utils/decorators/visibility.py\n+++ b/CTFd/utils/decorators/visibility.py\n@@ -30,10 +30,18 @@\n return redirect(url_for(\"auth.login\", next=request.full_path))\n \n elif v == ScoreVisibilityTypes.HIDDEN:\n- return (\n- render_template(\"errors/403.html\", error=\"Scores are currently hidden\"),\n- 403,\n- )\n+ if is_admin():\n+ return f(*args, **kwargs)\n+ else:\n+ if request.content_type == \"application/json\":\n+ abort(403)\n+ else:\n+ return (\n+ render_template(\n+ \"errors/403.html\", error=\"Scores are currently hidden\"\n+ ),\n+ 403,\n+ )\n \n elif v == ScoreVisibilityTypes.ADMINS:\n if is_admin():\n", "issue": "Hidden scores, hides graphs for admins as well\nHidden scores, hides graphs for admins as well. \n", "before_files": [{"content": "import functools\n\nfrom flask import abort, redirect, render_template, request, url_for\n\nfrom CTFd.constants.config import (\n AccountVisibilityTypes,\n ChallengeVisibilityTypes,\n ConfigTypes,\n RegistrationVisibilityTypes,\n ScoreVisibilityTypes,\n)\nfrom CTFd.utils import get_config\nfrom CTFd.utils.user import authed, is_admin\n\n\ndef check_score_visibility(f):\n @functools.wraps(f)\n def _check_score_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.SCORE_VISIBILITY)\n if v == ScoreVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ScoreVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ScoreVisibilityTypes.HIDDEN:\n return (\n render_template(\"errors/403.html\", error=\"Scores are currently hidden\"),\n 403,\n )\n\n elif v == ScoreVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_score_visibility\n\n\ndef check_challenge_visibility(f):\n @functools.wraps(f)\n def _check_challenge_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.CHALLENGE_VISIBILITY)\n if v == ChallengeVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ChallengeVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ChallengeVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n if authed():\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n return _check_challenge_visibility\n\n\ndef check_account_visibility(f):\n @functools.wraps(f)\n def _check_account_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.ACCOUNT_VISIBILITY)\n if v == AccountVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == AccountVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == AccountVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_account_visibility\n\n\ndef check_registration_visibility(f):\n @functools.wraps(f)\n def _check_registration_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.REGISTRATION_VISIBILITY)\n if v == RegistrationVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n elif v == RegistrationVisibilityTypes.PRIVATE:\n abort(404)\n\n return _check_registration_visibility\n", "path": "CTFd/utils/decorators/visibility.py"}, {"content": "from flask import Blueprint, render_template\n\nfrom CTFd.cache import cache, make_cache_key\nfrom CTFd.utils import config\nfrom CTFd.utils.decorators.visibility import check_score_visibility\nfrom CTFd.utils.helpers import get_infos\nfrom CTFd.utils.scores import get_standings\n\nscoreboard = Blueprint(\"scoreboard\", __name__)\n\n\[email protected](\"/scoreboard\")\n@check_score_visibility\[email protected](timeout=60, key_prefix=make_cache_key)\ndef listing():\n infos = get_infos()\n\n if config.is_scoreboard_frozen():\n infos.append(\"Scoreboard has been frozen\")\n\n standings = get_standings()\n return render_template(\"scoreboard.html\", standings=standings, infos=infos)\n", "path": "CTFd/scoreboard.py"}], "after_files": [{"content": "import functools\n\nfrom flask import abort, redirect, render_template, request, url_for\n\nfrom CTFd.constants.config import (\n AccountVisibilityTypes,\n ChallengeVisibilityTypes,\n ConfigTypes,\n RegistrationVisibilityTypes,\n ScoreVisibilityTypes,\n)\nfrom CTFd.utils import get_config\nfrom CTFd.utils.user import authed, is_admin\n\n\ndef check_score_visibility(f):\n @functools.wraps(f)\n def _check_score_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.SCORE_VISIBILITY)\n if v == ScoreVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ScoreVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ScoreVisibilityTypes.HIDDEN:\n if is_admin():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return (\n render_template(\n \"errors/403.html\", error=\"Scores are currently hidden\"\n ),\n 403,\n )\n\n elif v == ScoreVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_score_visibility\n\n\ndef check_challenge_visibility(f):\n @functools.wraps(f)\n def _check_challenge_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.CHALLENGE_VISIBILITY)\n if v == ChallengeVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == ChallengeVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == ChallengeVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n if authed():\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n return _check_challenge_visibility\n\n\ndef check_account_visibility(f):\n @functools.wraps(f)\n def _check_account_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.ACCOUNT_VISIBILITY)\n if v == AccountVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n\n elif v == AccountVisibilityTypes.PRIVATE:\n if authed():\n return f(*args, **kwargs)\n else:\n if request.content_type == \"application/json\":\n abort(403)\n else:\n return redirect(url_for(\"auth.login\", next=request.full_path))\n\n elif v == AccountVisibilityTypes.ADMINS:\n if is_admin():\n return f(*args, **kwargs)\n else:\n abort(404)\n\n return _check_account_visibility\n\n\ndef check_registration_visibility(f):\n @functools.wraps(f)\n def _check_registration_visibility(*args, **kwargs):\n v = get_config(ConfigTypes.REGISTRATION_VISIBILITY)\n if v == RegistrationVisibilityTypes.PUBLIC:\n return f(*args, **kwargs)\n elif v == RegistrationVisibilityTypes.PRIVATE:\n abort(404)\n\n return _check_registration_visibility\n", "path": "CTFd/utils/decorators/visibility.py"}, {"content": "from flask import Blueprint, render_template\n\nfrom CTFd.cache import cache, make_cache_key\nfrom CTFd.utils import config\nfrom CTFd.utils.config.visibility import scores_visible\nfrom CTFd.utils.decorators.visibility import check_score_visibility\nfrom CTFd.utils.helpers import get_infos\nfrom CTFd.utils.scores import get_standings\nfrom CTFd.utils.user import is_admin\n\nscoreboard = Blueprint(\"scoreboard\", __name__)\n\n\[email protected](\"/scoreboard\")\n@check_score_visibility\[email protected](timeout=60, key_prefix=make_cache_key)\ndef listing():\n infos = get_infos()\n\n if config.is_scoreboard_frozen():\n infos.append(\"Scoreboard has been frozen\")\n\n if is_admin() is True and scores_visible() is False:\n infos.append(\"Scores are not currently visible to users\")\n\n standings = get_standings()\n return render_template(\"scoreboard.html\", standings=standings, infos=infos)\n", "path": "CTFd/scoreboard.py"}]}
1,444
449
gh_patches_debug_11355
rasdani/github-patches
git_diff
quantumlib__Cirq-2554
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ParamResolver does not have evaluatable repr The reason is that values can be resolved into instances of sympy.Basic. For instance, `repr(cirq.ParamResolver({'a': sympy.Symbol('b') + 1}))` evaluates to `"cirq.ParamResolver({'a': b + 1})"` where the `b` is not a defined expression. This is currently blocking JSON serialization of ParamResolver because the serialization test insists that the repr be evaluatable. Should we drop this requirement for ParamResolver? Or should we actually try to get an evaluatable repr for arbitrary sympy.Basic instances? As @mpharrigan mentioned in #2014 we could maybe use `sympy.srepr`. @Strilanc --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/study/resolver.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Resolves ParameterValues to assigned values.""" 16 17 from typing import Dict, Union, TYPE_CHECKING, cast 18 import sympy 19 from cirq._doc import document 20 21 if TYPE_CHECKING: 22 import cirq 23 24 25 ParamDictType = Dict[Union[str, sympy.Symbol], Union[float, str, sympy.Basic]] 26 document( 27 ParamDictType, # type: ignore 28 """Dictionary from symbols to values.""") 29 30 ParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None] 31 document( 32 ParamResolverOrSimilarType, # type: ignore 33 """Something that can be used to turn parameters into values.""") 34 35 36 class ParamResolver(object): 37 """Resolves sympy.Symbols to actual values. 38 39 A Symbol is a wrapped parameter name (str). A ParamResolver is an object 40 that can be used to assign values for these keys. 41 42 ParamResolvers are hashable. 43 44 Attributes: 45 param_dict: A dictionary from the ParameterValue key (str) to its 46 assigned value. 47 """ 48 49 def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None): 50 if isinstance(param_dict, ParamResolver): 51 return param_dict 52 return super().__new__(cls) 53 54 def __init__(self, 55 param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None: 56 if hasattr(self, 'param_dict'): 57 return # Already initialized. Got wrapped as part of the __new__. 58 59 self._param_hash = None 60 self.param_dict = cast(ParamDictType, 61 {} if param_dict is None else param_dict) 62 63 def value_of(self, 64 value: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal': 65 """Attempt to resolve a Symbol, string, or float to its assigned value. 66 67 Floats are returned without modification. Strings are resolved via 68 the parameter dictionary with exact match only. Otherwise, strings 69 are considered to be sympy.Symbols with the name as the input string. 70 71 sympy.Symbols are first checked for exact match in the parameter 72 dictionary. Otherwise, the symbol is resolved using sympy substitution. 73 74 Note that passing a formula to this resolver can be slow due to the 75 underlying sympy library. For circuits relying on quick performance, 76 it is recommended that all formulas are flattened before-hand using 77 cirq.flatten or other means so that formula resolution is avoided. 78 If unable to resolve a sympy.Symbol, returns it unchanged. 79 If unable to resolve a name, returns a sympy.Symbol with that name. 80 81 Args: 82 value: The sympy.Symbol or name or float to try to resolve into just 83 a float. 84 85 Returns: 86 The value of the parameter as resolved by this resolver. 87 """ 88 # Input is a float, no resolution needed: return early 89 if isinstance(value, float): 90 return value 91 92 # Handles 2 cases: 93 # Input is a string and maps to a number in the dictionary 94 # Input is a symbol and maps to a number in the dictionary 95 # In both cases, return it directly. 96 if value in self.param_dict: 97 param_value = self.param_dict[value] 98 if isinstance(param_value, (float, int)): 99 return param_value 100 101 # Input is a string and is not in the dictionary. 102 # Treat it as a symbol instead. 103 if isinstance(value, str): 104 # If the string is in the param_dict as a value, return it. 105 # Otherwise, try using the symbol instead. 106 return self.value_of(sympy.Symbol(value)) 107 108 # Input is a symbol (sympy.Symbol('a')) and its string maps to a number 109 # in the dictionary ({'a': 1.0}). Return it. 110 if (isinstance(value, sympy.Symbol) and value.name in self.param_dict): 111 param_value = self.param_dict[value.name] 112 if isinstance(param_value, (float, int)): 113 return param_value 114 115 # Input is either a sympy formula or the dictionary maps to a 116 # formula. Use sympy to resolve the value. 117 # Note that sympy.subs() is slow, so we want to avoid this and 118 # only use it for cases that require complicated resolution. 119 if isinstance(value, sympy.Basic): 120 v = value.subs(self.param_dict) 121 if v.free_symbols: 122 return v 123 elif sympy.im(v): 124 return complex(v) 125 else: 126 return float(v) 127 128 # No known way to resolve this variable, return unchanged. 129 return value 130 131 def __iter__(self): 132 return iter(self.param_dict) 133 134 def __bool__(self): 135 return bool(self.param_dict) 136 137 def __getitem__(self, key): 138 return self.value_of(key) 139 140 def __hash__(self): 141 if self._param_hash is None: 142 self._param_hash = hash(frozenset(self.param_dict.items())) 143 return self._param_hash 144 145 def __eq__(self, other): 146 if not isinstance(other, ParamResolver): 147 return NotImplemented 148 return self.param_dict == other.param_dict 149 150 def __ne__(self, other): 151 return not self == other 152 153 def __repr__(self): 154 return 'cirq.ParamResolver({})'.format(repr(self.param_dict)) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/study/resolver.py b/cirq/study/resolver.py --- a/cirq/study/resolver.py +++ b/cirq/study/resolver.py @@ -16,6 +16,7 @@ from typing import Dict, Union, TYPE_CHECKING, cast import sympy +from cirq._compat import proper_repr from cirq._doc import document if TYPE_CHECKING: @@ -151,4 +152,8 @@ return not self == other def __repr__(self): - return 'cirq.ParamResolver({})'.format(repr(self.param_dict)) + param_dict_repr = ('{' + ', '.join([ + f'{proper_repr(k)}: {proper_repr(v)}' + for k, v in self.param_dict.items() + ]) + '}') + return 'cirq.ParamResolver({})'.format(param_dict_repr)
{"golden_diff": "diff --git a/cirq/study/resolver.py b/cirq/study/resolver.py\n--- a/cirq/study/resolver.py\n+++ b/cirq/study/resolver.py\n@@ -16,6 +16,7 @@\n \n from typing import Dict, Union, TYPE_CHECKING, cast\n import sympy\n+from cirq._compat import proper_repr\n from cirq._doc import document\n \n if TYPE_CHECKING:\n@@ -151,4 +152,8 @@\n return not self == other\n \n def __repr__(self):\n- return 'cirq.ParamResolver({})'.format(repr(self.param_dict))\n+ param_dict_repr = ('{' + ', '.join([\n+ f'{proper_repr(k)}: {proper_repr(v)}'\n+ for k, v in self.param_dict.items()\n+ ]) + '}')\n+ return 'cirq.ParamResolver({})'.format(param_dict_repr)\n", "issue": "ParamResolver does not have evaluatable repr\nThe reason is that values can be resolved into instances of sympy.Basic. For instance, `repr(cirq.ParamResolver({'a': sympy.Symbol('b') + 1}))` evaluates to `\"cirq.ParamResolver({'a': b + 1})\"` where the `b` is not a defined expression.\r\n\r\nThis is currently blocking JSON serialization of ParamResolver because the serialization test insists that the repr be evaluatable. Should we drop this requirement for ParamResolver? Or should we actually try to get an evaluatable repr for arbitrary sympy.Basic instances? As @mpharrigan mentioned in #2014 we could maybe use `sympy.srepr`.\r\n\r\n@Strilanc \n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resolves ParameterValues to assigned values.\"\"\"\n\nfrom typing import Dict, Union, TYPE_CHECKING, cast\nimport sympy\nfrom cirq._doc import document\n\nif TYPE_CHECKING:\n import cirq\n\n\nParamDictType = Dict[Union[str, sympy.Symbol], Union[float, str, sympy.Basic]]\ndocument(\n ParamDictType, # type: ignore\n \"\"\"Dictionary from symbols to values.\"\"\")\n\nParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None]\ndocument(\n ParamResolverOrSimilarType, # type: ignore\n \"\"\"Something that can be used to turn parameters into values.\"\"\")\n\n\nclass ParamResolver(object):\n \"\"\"Resolves sympy.Symbols to actual values.\n\n A Symbol is a wrapped parameter name (str). A ParamResolver is an object\n that can be used to assign values for these keys.\n\n ParamResolvers are hashable.\n\n Attributes:\n param_dict: A dictionary from the ParameterValue key (str) to its\n assigned value.\n \"\"\"\n\n def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None):\n if isinstance(param_dict, ParamResolver):\n return param_dict\n return super().__new__(cls)\n\n def __init__(self,\n param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None:\n if hasattr(self, 'param_dict'):\n return # Already initialized. Got wrapped as part of the __new__.\n\n self._param_hash = None\n self.param_dict = cast(ParamDictType,\n {} if param_dict is None else param_dict)\n\n def value_of(self,\n value: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal':\n \"\"\"Attempt to resolve a Symbol, string, or float to its assigned value.\n\n Floats are returned without modification. Strings are resolved via\n the parameter dictionary with exact match only. Otherwise, strings\n are considered to be sympy.Symbols with the name as the input string.\n\n sympy.Symbols are first checked for exact match in the parameter\n dictionary. Otherwise, the symbol is resolved using sympy substitution.\n\n Note that passing a formula to this resolver can be slow due to the\n underlying sympy library. For circuits relying on quick performance,\n it is recommended that all formulas are flattened before-hand using\n cirq.flatten or other means so that formula resolution is avoided.\n If unable to resolve a sympy.Symbol, returns it unchanged.\n If unable to resolve a name, returns a sympy.Symbol with that name.\n\n Args:\n value: The sympy.Symbol or name or float to try to resolve into just\n a float.\n\n Returns:\n The value of the parameter as resolved by this resolver.\n \"\"\"\n # Input is a float, no resolution needed: return early\n if isinstance(value, float):\n return value\n\n # Handles 2 cases:\n # Input is a string and maps to a number in the dictionary\n # Input is a symbol and maps to a number in the dictionary\n # In both cases, return it directly.\n if value in self.param_dict:\n param_value = self.param_dict[value]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # Input is a string and is not in the dictionary.\n # Treat it as a symbol instead.\n if isinstance(value, str):\n # If the string is in the param_dict as a value, return it.\n # Otherwise, try using the symbol instead.\n return self.value_of(sympy.Symbol(value))\n\n # Input is a symbol (sympy.Symbol('a')) and its string maps to a number\n # in the dictionary ({'a': 1.0}). Return it.\n if (isinstance(value, sympy.Symbol) and value.name in self.param_dict):\n param_value = self.param_dict[value.name]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # Input is either a sympy formula or the dictionary maps to a\n # formula. Use sympy to resolve the value.\n # Note that sympy.subs() is slow, so we want to avoid this and\n # only use it for cases that require complicated resolution.\n if isinstance(value, sympy.Basic):\n v = value.subs(self.param_dict)\n if v.free_symbols:\n return v\n elif sympy.im(v):\n return complex(v)\n else:\n return float(v)\n\n # No known way to resolve this variable, return unchanged.\n return value\n\n def __iter__(self):\n return iter(self.param_dict)\n\n def __bool__(self):\n return bool(self.param_dict)\n\n def __getitem__(self, key):\n return self.value_of(key)\n\n def __hash__(self):\n if self._param_hash is None:\n self._param_hash = hash(frozenset(self.param_dict.items()))\n return self._param_hash\n\n def __eq__(self, other):\n if not isinstance(other, ParamResolver):\n return NotImplemented\n return self.param_dict == other.param_dict\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return 'cirq.ParamResolver({})'.format(repr(self.param_dict))\n", "path": "cirq/study/resolver.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resolves ParameterValues to assigned values.\"\"\"\n\nfrom typing import Dict, Union, TYPE_CHECKING, cast\nimport sympy\nfrom cirq._compat import proper_repr\nfrom cirq._doc import document\n\nif TYPE_CHECKING:\n import cirq\n\n\nParamDictType = Dict[Union[str, sympy.Symbol], Union[float, str, sympy.Basic]]\ndocument(\n ParamDictType, # type: ignore\n \"\"\"Dictionary from symbols to values.\"\"\")\n\nParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None]\ndocument(\n ParamResolverOrSimilarType, # type: ignore\n \"\"\"Something that can be used to turn parameters into values.\"\"\")\n\n\nclass ParamResolver(object):\n \"\"\"Resolves sympy.Symbols to actual values.\n\n A Symbol is a wrapped parameter name (str). A ParamResolver is an object\n that can be used to assign values for these keys.\n\n ParamResolvers are hashable.\n\n Attributes:\n param_dict: A dictionary from the ParameterValue key (str) to its\n assigned value.\n \"\"\"\n\n def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None):\n if isinstance(param_dict, ParamResolver):\n return param_dict\n return super().__new__(cls)\n\n def __init__(self,\n param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None:\n if hasattr(self, 'param_dict'):\n return # Already initialized. Got wrapped as part of the __new__.\n\n self._param_hash = None\n self.param_dict = cast(ParamDictType,\n {} if param_dict is None else param_dict)\n\n def value_of(self,\n value: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal':\n \"\"\"Attempt to resolve a Symbol, string, or float to its assigned value.\n\n Floats are returned without modification. Strings are resolved via\n the parameter dictionary with exact match only. Otherwise, strings\n are considered to be sympy.Symbols with the name as the input string.\n\n sympy.Symbols are first checked for exact match in the parameter\n dictionary. Otherwise, the symbol is resolved using sympy substitution.\n\n Note that passing a formula to this resolver can be slow due to the\n underlying sympy library. For circuits relying on quick performance,\n it is recommended that all formulas are flattened before-hand using\n cirq.flatten or other means so that formula resolution is avoided.\n If unable to resolve a sympy.Symbol, returns it unchanged.\n If unable to resolve a name, returns a sympy.Symbol with that name.\n\n Args:\n value: The sympy.Symbol or name or float to try to resolve into just\n a float.\n\n Returns:\n The value of the parameter as resolved by this resolver.\n \"\"\"\n # Input is a float, no resolution needed: return early\n if isinstance(value, float):\n return value\n\n # Handles 2 cases:\n # Input is a string and maps to a number in the dictionary\n # Input is a symbol and maps to a number in the dictionary\n # In both cases, return it directly.\n if value in self.param_dict:\n param_value = self.param_dict[value]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # Input is a string and is not in the dictionary.\n # Treat it as a symbol instead.\n if isinstance(value, str):\n # If the string is in the param_dict as a value, return it.\n # Otherwise, try using the symbol instead.\n return self.value_of(sympy.Symbol(value))\n\n # Input is a symbol (sympy.Symbol('a')) and its string maps to a number\n # in the dictionary ({'a': 1.0}). Return it.\n if (isinstance(value, sympy.Symbol) and value.name in self.param_dict):\n param_value = self.param_dict[value.name]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # Input is either a sympy formula or the dictionary maps to a\n # formula. Use sympy to resolve the value.\n # Note that sympy.subs() is slow, so we want to avoid this and\n # only use it for cases that require complicated resolution.\n if isinstance(value, sympy.Basic):\n v = value.subs(self.param_dict)\n if v.free_symbols:\n return v\n elif sympy.im(v):\n return complex(v)\n else:\n return float(v)\n\n # No known way to resolve this variable, return unchanged.\n return value\n\n def __iter__(self):\n return iter(self.param_dict)\n\n def __bool__(self):\n return bool(self.param_dict)\n\n def __getitem__(self, key):\n return self.value_of(key)\n\n def __hash__(self):\n if self._param_hash is None:\n self._param_hash = hash(frozenset(self.param_dict.items()))\n return self._param_hash\n\n def __eq__(self, other):\n if not isinstance(other, ParamResolver):\n return NotImplemented\n return self.param_dict == other.param_dict\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n param_dict_repr = ('{' + ', '.join([\n f'{proper_repr(k)}: {proper_repr(v)}'\n for k, v in self.param_dict.items()\n ]) + '}')\n return 'cirq.ParamResolver({})'.format(param_dict_repr)\n", "path": "cirq/study/resolver.py"}]}
2,066
201
gh_patches_debug_28399
rasdani/github-patches
git_diff
mirumee__ariadne-1162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python 3.12 has deprecation warnings Ariadne produces few warnings when ran on Py3.12. Most of those are out of our hands as they are coming from dependencies, but there are some we should fix: ``` /ariadne/ariadne/contrib/tracing/apollotracing.py:36: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). self.start_date = datetime.utcnow() /ariadne/ariadne/contrib/tracing/apollotracing.py:96: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). "end": datetime.utcnow(), ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ariadne/contrib/tracing/apollotracing.py` Content: ``` 1 from datetime import datetime 2 from inspect import iscoroutinefunction 3 from typing import Any, List, Optional, cast 4 5 from graphql import GraphQLResolveInfo 6 from graphql.pyutils import is_awaitable 7 8 from ...types import ContextValue, Extension, Resolver 9 from .utils import format_path, should_trace 10 11 try: 12 from time import perf_counter_ns 13 except ImportError: 14 # Py 3.6 fallback 15 from time import perf_counter 16 17 NS_IN_SECOND = 1000000000 18 19 def perf_counter_ns() -> int: 20 return int(perf_counter() * NS_IN_SECOND) 21 22 23 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" 24 25 26 class ApolloTracingExtension(Extension): 27 def __init__(self, trace_default_resolver: bool = False) -> None: 28 self.trace_default_resolver = trace_default_resolver 29 self.start_date: Optional[datetime] = None 30 self.start_timestamp: int = 0 31 self.resolvers: List[dict] = [] 32 33 self._totals = None 34 35 def request_started(self, context: ContextValue): 36 self.start_date = datetime.utcnow() 37 self.start_timestamp = perf_counter_ns() 38 39 def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs): 40 if not should_trace(info, self.trace_default_resolver): 41 return next_(obj, info, **kwargs) 42 43 if iscoroutinefunction(next_): 44 return self.resolve_async(next_, obj, info, **kwargs) 45 46 return self.resolve_sync(next_, obj, info, **kwargs) 47 48 async def resolve_async( 49 self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs 50 ): 51 start_timestamp = perf_counter_ns() 52 record = { 53 "path": format_path(info.path), 54 "parentType": str(info.parent_type), 55 "fieldName": info.field_name, 56 "returnType": str(info.return_type), 57 "startOffset": start_timestamp - cast(int, self.start_timestamp), 58 } 59 self.resolvers.append(record) 60 try: 61 result = next_(obj, info, **kwargs) 62 if is_awaitable(result): 63 result = await result 64 return result 65 finally: 66 end_timestamp = perf_counter_ns() 67 record["duration"] = end_timestamp - start_timestamp 68 69 def resolve_sync( 70 self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs 71 ): 72 start_timestamp = perf_counter_ns() 73 record = { 74 "path": format_path(info.path), 75 "parentType": str(info.parent_type), 76 "fieldName": info.field_name, 77 "returnType": str(info.return_type), 78 "startOffset": start_timestamp - cast(int, self.start_timestamp), 79 } 80 self.resolvers.append(record) 81 try: 82 result = next_(obj, info, **kwargs) 83 return result 84 finally: 85 end_timestamp = perf_counter_ns() 86 record["duration"] = end_timestamp - start_timestamp 87 88 def get_totals(self): 89 if self._totals is None: 90 self._totals = self._get_totals() 91 return self._totals 92 93 def _get_totals(self): 94 return { 95 "start": self.start_date, 96 "end": datetime.utcnow(), 97 "duration": perf_counter_ns() - self.start_timestamp, 98 "resolvers": self.resolvers, 99 } 100 101 def format(self, context: ContextValue): 102 totals = self.get_totals() 103 104 return { 105 "tracing": { 106 "version": 1, 107 "startTime": totals["start"].strftime(TIMESTAMP_FORMAT), 108 "endTime": totals["end"].strftime(TIMESTAMP_FORMAT), 109 "duration": totals["duration"], 110 "execution": {"resolvers": totals["resolvers"]}, 111 } 112 } 113 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ariadne/contrib/tracing/apollotracing.py b/ariadne/contrib/tracing/apollotracing.py --- a/ariadne/contrib/tracing/apollotracing.py +++ b/ariadne/contrib/tracing/apollotracing.py @@ -1,4 +1,3 @@ -from datetime import datetime from inspect import iscoroutinefunction from typing import Any, List, Optional, cast @@ -20,6 +19,19 @@ return int(perf_counter() * NS_IN_SECOND) +try: + from datetime import UTC, datetime # type: ignore[attr-defined] + + def utc_now(): + return datetime.now(UTC) + +except ImportError: + from datetime import datetime + + def utc_now(): + return datetime.utcnow() + + TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" @@ -33,7 +45,7 @@ self._totals = None def request_started(self, context: ContextValue): - self.start_date = datetime.utcnow() + self.start_date = utc_now() self.start_timestamp = perf_counter_ns() def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs): @@ -93,7 +105,7 @@ def _get_totals(self): return { "start": self.start_date, - "end": datetime.utcnow(), + "end": utc_now(), "duration": perf_counter_ns() - self.start_timestamp, "resolvers": self.resolvers, }
{"golden_diff": "diff --git a/ariadne/contrib/tracing/apollotracing.py b/ariadne/contrib/tracing/apollotracing.py\n--- a/ariadne/contrib/tracing/apollotracing.py\n+++ b/ariadne/contrib/tracing/apollotracing.py\n@@ -1,4 +1,3 @@\n-from datetime import datetime\n from inspect import iscoroutinefunction\n from typing import Any, List, Optional, cast\n \n@@ -20,6 +19,19 @@\n return int(perf_counter() * NS_IN_SECOND)\n \n \n+try:\n+ from datetime import UTC, datetime # type: ignore[attr-defined]\n+\n+ def utc_now():\n+ return datetime.now(UTC)\n+\n+except ImportError:\n+ from datetime import datetime\n+\n+ def utc_now():\n+ return datetime.utcnow()\n+\n+\n TIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n \n \n@@ -33,7 +45,7 @@\n self._totals = None\n \n def request_started(self, context: ContextValue):\n- self.start_date = datetime.utcnow()\n+ self.start_date = utc_now()\n self.start_timestamp = perf_counter_ns()\n \n def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs):\n@@ -93,7 +105,7 @@\n def _get_totals(self):\n return {\n \"start\": self.start_date,\n- \"end\": datetime.utcnow(),\n+ \"end\": utc_now(),\n \"duration\": perf_counter_ns() - self.start_timestamp,\n \"resolvers\": self.resolvers,\n }\n", "issue": "Python 3.12 has deprecation warnings\nAriadne produces few warnings when ran on Py3.12. Most of those are out of our hands as they are coming from dependencies, but there are some we should fix:\r\n\r\n```\r\n\r\n /ariadne/ariadne/contrib/tracing/apollotracing.py:36: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n self.start_date = datetime.utcnow()\r\n\r\n /ariadne/ariadne/contrib/tracing/apollotracing.py:96: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC).\r\n \"end\": datetime.utcnow(),\r\n```\n", "before_files": [{"content": "from datetime import datetime\nfrom inspect import iscoroutinefunction\nfrom typing import Any, List, Optional, cast\n\nfrom graphql import GraphQLResolveInfo\nfrom graphql.pyutils import is_awaitable\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\ntry:\n from time import perf_counter_ns\nexcept ImportError:\n # Py 3.6 fallback\n from time import perf_counter\n\n NS_IN_SECOND = 1000000000\n\n def perf_counter_ns() -> int:\n return int(perf_counter() * NS_IN_SECOND)\n\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\n\nclass ApolloTracingExtension(Extension):\n def __init__(self, trace_default_resolver: bool = False) -> None:\n self.trace_default_resolver = trace_default_resolver\n self.start_date: Optional[datetime] = None\n self.start_timestamp: int = 0\n self.resolvers: List[dict] = []\n\n self._totals = None\n\n def request_started(self, context: ContextValue):\n self.start_date = datetime.utcnow()\n self.start_timestamp = perf_counter_ns()\n\n def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs):\n if not should_trace(info, self.trace_default_resolver):\n return next_(obj, info, **kwargs)\n\n if iscoroutinefunction(next_):\n return self.resolve_async(next_, obj, info, **kwargs)\n\n return self.resolve_sync(next_, obj, info, **kwargs)\n\n async def resolve_async(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n if is_awaitable(result):\n result = await result\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def resolve_sync(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def get_totals(self):\n if self._totals is None:\n self._totals = self._get_totals()\n return self._totals\n\n def _get_totals(self):\n return {\n \"start\": self.start_date,\n \"end\": datetime.utcnow(),\n \"duration\": perf_counter_ns() - self.start_timestamp,\n \"resolvers\": self.resolvers,\n }\n\n def format(self, context: ContextValue):\n totals = self.get_totals()\n\n return {\n \"tracing\": {\n \"version\": 1,\n \"startTime\": totals[\"start\"].strftime(TIMESTAMP_FORMAT),\n \"endTime\": totals[\"end\"].strftime(TIMESTAMP_FORMAT),\n \"duration\": totals[\"duration\"],\n \"execution\": {\"resolvers\": totals[\"resolvers\"]},\n }\n }\n", "path": "ariadne/contrib/tracing/apollotracing.py"}], "after_files": [{"content": "from inspect import iscoroutinefunction\nfrom typing import Any, List, Optional, cast\n\nfrom graphql import GraphQLResolveInfo\nfrom graphql.pyutils import is_awaitable\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\ntry:\n from time import perf_counter_ns\nexcept ImportError:\n # Py 3.6 fallback\n from time import perf_counter\n\n NS_IN_SECOND = 1000000000\n\n def perf_counter_ns() -> int:\n return int(perf_counter() * NS_IN_SECOND)\n\n\ntry:\n from datetime import UTC, datetime # type: ignore[attr-defined]\n\n def utc_now():\n return datetime.now(UTC)\n\nexcept ImportError:\n from datetime import datetime\n\n def utc_now():\n return datetime.utcnow()\n\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\n\nclass ApolloTracingExtension(Extension):\n def __init__(self, trace_default_resolver: bool = False) -> None:\n self.trace_default_resolver = trace_default_resolver\n self.start_date: Optional[datetime] = None\n self.start_timestamp: int = 0\n self.resolvers: List[dict] = []\n\n self._totals = None\n\n def request_started(self, context: ContextValue):\n self.start_date = utc_now()\n self.start_timestamp = perf_counter_ns()\n\n def resolve(self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs):\n if not should_trace(info, self.trace_default_resolver):\n return next_(obj, info, **kwargs)\n\n if iscoroutinefunction(next_):\n return self.resolve_async(next_, obj, info, **kwargs)\n\n return self.resolve_sync(next_, obj, info, **kwargs)\n\n async def resolve_async(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n if is_awaitable(result):\n result = await result\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def resolve_sync(\n self, next_: Resolver, obj: Any, info: GraphQLResolveInfo, **kwargs\n ):\n start_timestamp = perf_counter_ns()\n record = {\n \"path\": format_path(info.path),\n \"parentType\": str(info.parent_type),\n \"fieldName\": info.field_name,\n \"returnType\": str(info.return_type),\n \"startOffset\": start_timestamp - cast(int, self.start_timestamp),\n }\n self.resolvers.append(record)\n try:\n result = next_(obj, info, **kwargs)\n return result\n finally:\n end_timestamp = perf_counter_ns()\n record[\"duration\"] = end_timestamp - start_timestamp\n\n def get_totals(self):\n if self._totals is None:\n self._totals = self._get_totals()\n return self._totals\n\n def _get_totals(self):\n return {\n \"start\": self.start_date,\n \"end\": utc_now(),\n \"duration\": perf_counter_ns() - self.start_timestamp,\n \"resolvers\": self.resolvers,\n }\n\n def format(self, context: ContextValue):\n totals = self.get_totals()\n\n return {\n \"tracing\": {\n \"version\": 1,\n \"startTime\": totals[\"start\"].strftime(TIMESTAMP_FORMAT),\n \"endTime\": totals[\"end\"].strftime(TIMESTAMP_FORMAT),\n \"duration\": totals[\"duration\"],\n \"execution\": {\"resolvers\": totals[\"resolvers\"]},\n }\n }\n", "path": "ariadne/contrib/tracing/apollotracing.py"}]}
1,499
355
gh_patches_debug_6879
rasdani/github-patches
git_diff
conan-io__conan-center-index-5795
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] apr/1.7.0: consumer packages fail to find apr when used as shared ### Package and Environment Details * Package Name/Version: **apr/1.7.0** * Operating System+version: **Windows 10** * Compiler+version: **MSVC v142** * Conan version: **conan 1.35.0** * Python version: **Python 3.7.9** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` [settings] os=Windows os_build=Windows arch=x86 arch_build=x86_64 compiler=Visual Studio compiler.version=16 build_type=Release [options] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) Install apr/1.7.0 as shared conan install apr/1.7.0@ -o shared=True --build=apr Try installing a consumer of apr. Trying with serf conan install serf/1.3.9@ -o apr:shared=True -o apr-util:shared=True --build=serf --build=apr-util ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` LINK : fatal error LNK1181: cannot open input file 'libapr-1.lib' scons: *** [libserf-1.dll] Error 1181 scons: building terminated because of errors. serf/1.3.9: serf/1.3.9: ERROR: Package 'e896ac717b33682561d2e328d65b7fe6a0bec462' build failed serf/1.3.9: WARN: Build folder C:\projects\.conan\data\serf\1.3.9\_\_\build\e896ac717b33682561d2e328d65b7fe6a0bec462 ERROR: serf/1.3.9: Error in build() method, line 111 self.run("scons {} {}".format(" ".join(escape_str(s) for s in args), " ".join("{}={}".format(k, escape_str(v)) for k, v in kwargs.items())), run_environment=True) ``` </details> Patch 0001 changes the name of the library binary from libapr-1 to apr-1 when compiled as shared. This was done so conan only compiles one library, either static or shared, not both as apr usually does. This name change affects compilation of consumer packages like serf which expects the libapr-1 name when linking the library as shared. I propose to make what apr-util does, droping the name change and make cpp_info.libs take the correct name https://github.com/cesenaLA/conan-center-index/commit/bbc084c0f09f09070ed840eb3c8239d0d9d1b152 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/apr/all/conanfile.py` Content: ``` 1 import os 2 import re 3 from conans import AutoToolsBuildEnvironment, ConanFile, CMake, tools 4 from conans.errors import ConanException, ConanInvalidConfiguration 5 6 7 class AprConan(ConanFile): 8 name = "apr" 9 description = "The Apache Portable Runtime (APR) provides a predictable and consistent interface to underlying platform-specific implementations" 10 license = "Apache-2.0" 11 topics = ("conan", "apr", "apache", "platform", "library") 12 homepage = "https://apr.apache.org/" 13 url = "https://github.com/conan-io/conan-center-index" 14 exports_sources = "CMakeLists.txt", "patches/**" 15 generators = "cmake" 16 settings = "os", "arch", "compiler", "build_type" 17 options = { 18 "shared": [True, False], 19 "fPIC": [True, False], 20 "force_apr_uuid": [True, False], 21 } 22 default_options = { 23 "shared": False, 24 "fPIC": True, 25 "force_apr_uuid": True, 26 } 27 28 _autotools = None 29 _cmake = None 30 31 def config_options(self): 32 if self.settings.os == "Windows": 33 del self.options.fPIC 34 35 def configure(self): 36 if self.options.shared: 37 del self.options.fPIC 38 del self.settings.compiler.cppstd 39 del self.settings.compiler.libcxx 40 41 if (self.settings.compiler == "apple-clang" and 42 tools.Version(self.settings.compiler.version) == "12" and 43 self.version == "1.7.0"): 44 raise ConanInvalidConfiguration("apr does not (yet) support apple-clang 12") 45 46 @property 47 def _source_subfolder(self): 48 return "source_subfolder" 49 50 @property 51 def _build_subfolder(self): 52 return "build_subfolder" 53 54 def source(self): 55 tools.get(**self.conan_data["sources"][self.version]) 56 os.rename("{}-{}".format(self.name, self.version), self._source_subfolder) 57 58 def _configure_cmake(self): 59 if self._cmake: 60 return self._cmake 61 self._cmake = CMake(self) 62 self._cmake.definitions["INSTALL_PDB"] = False 63 self._cmake.definitions["APR_BUILD_TESTAPR"] = False 64 self._cmake.configure(build_folder=self._build_subfolder) 65 return self._cmake 66 67 def _configure_autotools(self): 68 if self._autotools: 69 return self._autotools 70 self._autotools = AutoToolsBuildEnvironment(self) 71 self._autotools.libs = [] 72 yes_no = lambda v: "yes" if v else "no" 73 conf_args = [ 74 "--with-installbuilddir=${prefix}/bin/build-1", 75 "--enable-shared={}".format(yes_no(self.options.shared)), 76 "--enable-static={}".format(yes_no(not self.options.shared)), 77 ] 78 if tools.cross_building(self.settings): 79 # 80 conf_args.append("apr_cv_mutex_robust_shared=yes") 81 self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder) 82 return self._autotools 83 84 def _patch_sources(self): 85 for patch in self.conan_data["patches"][self.version]: 86 tools.patch(**patch) 87 if self.options.force_apr_uuid: 88 tools.replace_in_file(os.path.join(self._source_subfolder, "include", "apr.h.in"), 89 "@osuuid@", "0") 90 91 def build(self): 92 self._patch_sources() 93 if self.settings.compiler == "Visual Studio": 94 cmake = self._configure_cmake() 95 cmake.build(target="libapr-1" if self.options.shared else "apr-1") 96 else: 97 autotools = self._configure_autotools() 98 autotools.make() 99 100 def package(self): 101 self.copy("LICENSE", dst="licenses", src=self._source_subfolder) 102 if self.settings.compiler == "Visual Studio": 103 cmake = self._configure_cmake() 104 cmake.install() 105 else: 106 autotools = self._configure_autotools() 107 autotools.install() 108 109 os.unlink(os.path.join(self.package_folder, "lib", "libapr-1.la")) 110 tools.rmdir(os.path.join(self.package_folder, "build-1")) 111 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) 112 113 apr_rules_mk = os.path.join(self.package_folder, "bin", "build-1", "apr_rules.mk") 114 apr_rules_cnt = open(apr_rules_mk).read() 115 for key in ("apr_builddir", "apr_builders", "top_builddir"): 116 apr_rules_cnt, nb = re.subn("^{}=[^\n]*\n".format(key), "{}=$(_APR_BUILDDIR)\n".format(key), apr_rules_cnt, flags=re.MULTILINE) 117 if nb == 0: 118 raise ConanException("Could not find/replace {} in {}".format(key, apr_rules_mk)) 119 open(apr_rules_mk, "w").write(apr_rules_cnt) 120 121 def package_info(self): 122 self.cpp_info.names["pkg_config"] = "apr-1" 123 self.cpp_info.libs = ["apr-1"] 124 if not self.options.shared: 125 self.cpp_info.defines = ["APR_DECLARE_STATIC"] 126 if self.settings.os == "Linux": 127 self.cpp_info.system_libs = ["dl", "pthread"] 128 if self.settings.os == "Windows": 129 self.cpp_info.system_libs = ["rpcrt4"] 130 131 apr_root = self.package_folder 132 if tools.os_info.is_windows: 133 apr_root = tools.unix_path(apr_root) 134 self.output.info("Settings APR_ROOT environment var: {}".format(apr_root)) 135 self.env_info.APR_ROOT = apr_root 136 137 apr_mk_dir = os.path.join(self.package_folder, "bin", "build-1") 138 if tools.os_info.is_windows: 139 apr_mk_dir = tools.unix_path(apr_mk_dir) 140 self.env_info._APR_BUILDDIR = apr_mk_dir 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/apr/all/conanfile.py b/recipes/apr/all/conanfile.py --- a/recipes/apr/all/conanfile.py +++ b/recipes/apr/all/conanfile.py @@ -120,7 +120,7 @@ def package_info(self): self.cpp_info.names["pkg_config"] = "apr-1" - self.cpp_info.libs = ["apr-1"] + self.cpp_info.libs = ["libapr-1" if self.settings.compiler == "Visual Studio" and self.options.shared else "apr-1"] if not self.options.shared: self.cpp_info.defines = ["APR_DECLARE_STATIC"] if self.settings.os == "Linux":
{"golden_diff": "diff --git a/recipes/apr/all/conanfile.py b/recipes/apr/all/conanfile.py\n--- a/recipes/apr/all/conanfile.py\n+++ b/recipes/apr/all/conanfile.py\n@@ -120,7 +120,7 @@\n \n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"apr-1\"\n- self.cpp_info.libs = [\"apr-1\"]\n+ self.cpp_info.libs = [\"libapr-1\" if self.settings.compiler == \"Visual Studio\" and self.options.shared else \"apr-1\"]\n if not self.options.shared:\n self.cpp_info.defines = [\"APR_DECLARE_STATIC\"]\n if self.settings.os == \"Linux\":\n", "issue": "[package] apr/1.7.0: consumer packages fail to find apr when used as shared\n### Package and Environment Details\r\n * Package Name/Version: **apr/1.7.0**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **MSVC v142**\r\n * Conan version: **conan 1.35.0**\r\n * Python version: **Python 3.7.9**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nInstall apr/1.7.0 as shared\r\nconan install apr/1.7.0@ -o shared=True --build=apr\r\n\r\nTry installing a consumer of apr. Trying with serf\r\nconan install serf/1.3.9@ -o apr:shared=True -o apr-util:shared=True --build=serf --build=apr-util\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nLINK : fatal error LNK1181: cannot open input file 'libapr-1.lib'\r\nscons: *** [libserf-1.dll] Error 1181\r\nscons: building terminated because of errors.\r\nserf/1.3.9:\r\nserf/1.3.9: ERROR: Package 'e896ac717b33682561d2e328d65b7fe6a0bec462' build failed\r\nserf/1.3.9: WARN: Build folder C:\\projects\\.conan\\data\\serf\\1.3.9\\_\\_\\build\\e896ac717b33682561d2e328d65b7fe6a0bec462\r\nERROR: serf/1.3.9: Error in build() method, line 111\r\n self.run(\"scons {} {}\".format(\" \".join(escape_str(s) for s in args), \" \".join(\"{}={}\".format(k, escape_str(v)) for k, v in kwargs.items())), run_environment=True)\r\n```\r\n\r\n</details>\r\nPatch 0001 changes the name of the library binary from libapr-1 to apr-1 when compiled as shared. This was done so conan only compiles one library, either static or shared, not both as apr usually does.\r\n\r\nThis name change affects compilation of consumer packages like serf which expects the libapr-1 name when linking the library as shared.\r\n\r\nI propose to make what apr-util does, droping the name change and make cpp_info.libs take the correct name\r\nhttps://github.com/cesenaLA/conan-center-index/commit/bbc084c0f09f09070ed840eb3c8239d0d9d1b152\n", "before_files": [{"content": "import os\nimport re\nfrom conans import AutoToolsBuildEnvironment, ConanFile, CMake, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\n\n\nclass AprConan(ConanFile):\n name = \"apr\"\n description = \"The Apache Portable Runtime (APR) provides a predictable and consistent interface to underlying platform-specific implementations\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"apr\", \"apache\", \"platform\", \"library\")\n homepage = \"https://apr.apache.org/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"force_apr_uuid\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"force_apr_uuid\": True,\n }\n\n _autotools = None\n _cmake = None\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n if (self.settings.compiler == \"apple-clang\" and\n tools.Version(self.settings.compiler.version) == \"12\" and\n self.version == \"1.7.0\"):\n raise ConanInvalidConfiguration(\"apr does not (yet) support apple-clang 12\")\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"INSTALL_PDB\"] = False\n self._cmake.definitions[\"APR_BUILD_TESTAPR\"] = False\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n self._autotools.libs = []\n yes_no = lambda v: \"yes\" if v else \"no\"\n conf_args = [\n \"--with-installbuilddir=${prefix}/bin/build-1\",\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n ]\n if tools.cross_building(self.settings):\n #\n conf_args.append(\"apr_cv_mutex_robust_shared=yes\")\n self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder)\n return self._autotools\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.options.force_apr_uuid:\n tools.replace_in_file(os.path.join(self._source_subfolder, \"include\", \"apr.h.in\"),\n \"@osuuid@\", \"0\")\n\n def build(self):\n self._patch_sources()\n if self.settings.compiler == \"Visual Studio\":\n cmake = self._configure_cmake()\n cmake.build(target=\"libapr-1\" if self.options.shared else \"apr-1\")\n else:\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n cmake = self._configure_cmake()\n cmake.install()\n else:\n autotools = self._configure_autotools()\n autotools.install()\n\n os.unlink(os.path.join(self.package_folder, \"lib\", \"libapr-1.la\"))\n tools.rmdir(os.path.join(self.package_folder, \"build-1\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n apr_rules_mk = os.path.join(self.package_folder, \"bin\", \"build-1\", \"apr_rules.mk\")\n apr_rules_cnt = open(apr_rules_mk).read()\n for key in (\"apr_builddir\", \"apr_builders\", \"top_builddir\"):\n apr_rules_cnt, nb = re.subn(\"^{}=[^\\n]*\\n\".format(key), \"{}=$(_APR_BUILDDIR)\\n\".format(key), apr_rules_cnt, flags=re.MULTILINE)\n if nb == 0:\n raise ConanException(\"Could not find/replace {} in {}\".format(key, apr_rules_mk))\n open(apr_rules_mk, \"w\").write(apr_rules_cnt)\n\n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"apr-1\"\n self.cpp_info.libs = [\"apr-1\"]\n if not self.options.shared:\n self.cpp_info.defines = [\"APR_DECLARE_STATIC\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"dl\", \"pthread\"]\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"rpcrt4\"]\n\n apr_root = self.package_folder\n if tools.os_info.is_windows:\n apr_root = tools.unix_path(apr_root)\n self.output.info(\"Settings APR_ROOT environment var: {}\".format(apr_root))\n self.env_info.APR_ROOT = apr_root\n\n apr_mk_dir = os.path.join(self.package_folder, \"bin\", \"build-1\")\n if tools.os_info.is_windows:\n apr_mk_dir = tools.unix_path(apr_mk_dir)\n self.env_info._APR_BUILDDIR = apr_mk_dir\n", "path": "recipes/apr/all/conanfile.py"}], "after_files": [{"content": "import os\nimport re\nfrom conans import AutoToolsBuildEnvironment, ConanFile, CMake, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\n\n\nclass AprConan(ConanFile):\n name = \"apr\"\n description = \"The Apache Portable Runtime (APR) provides a predictable and consistent interface to underlying platform-specific implementations\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"apr\", \"apache\", \"platform\", \"library\")\n homepage = \"https://apr.apache.org/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"force_apr_uuid\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"force_apr_uuid\": True,\n }\n\n _autotools = None\n _cmake = None\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.cppstd\n del self.settings.compiler.libcxx\n\n if (self.settings.compiler == \"apple-clang\" and\n tools.Version(self.settings.compiler.version) == \"12\" and\n self.version == \"1.7.0\"):\n raise ConanInvalidConfiguration(\"apr does not (yet) support apple-clang 12\")\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"INSTALL_PDB\"] = False\n self._cmake.definitions[\"APR_BUILD_TESTAPR\"] = False\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n self._autotools.libs = []\n yes_no = lambda v: \"yes\" if v else \"no\"\n conf_args = [\n \"--with-installbuilddir=${prefix}/bin/build-1\",\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n ]\n if tools.cross_building(self.settings):\n #\n conf_args.append(\"apr_cv_mutex_robust_shared=yes\")\n self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder)\n return self._autotools\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.options.force_apr_uuid:\n tools.replace_in_file(os.path.join(self._source_subfolder, \"include\", \"apr.h.in\"),\n \"@osuuid@\", \"0\")\n\n def build(self):\n self._patch_sources()\n if self.settings.compiler == \"Visual Studio\":\n cmake = self._configure_cmake()\n cmake.build(target=\"libapr-1\" if self.options.shared else \"apr-1\")\n else:\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n cmake = self._configure_cmake()\n cmake.install()\n else:\n autotools = self._configure_autotools()\n autotools.install()\n\n os.unlink(os.path.join(self.package_folder, \"lib\", \"libapr-1.la\"))\n tools.rmdir(os.path.join(self.package_folder, \"build-1\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n apr_rules_mk = os.path.join(self.package_folder, \"bin\", \"build-1\", \"apr_rules.mk\")\n apr_rules_cnt = open(apr_rules_mk).read()\n for key in (\"apr_builddir\", \"apr_builders\", \"top_builddir\"):\n apr_rules_cnt, nb = re.subn(\"^{}=[^\\n]*\\n\".format(key), \"{}=$(_APR_BUILDDIR)\\n\".format(key), apr_rules_cnt, flags=re.MULTILINE)\n if nb == 0:\n raise ConanException(\"Could not find/replace {} in {}\".format(key, apr_rules_mk))\n open(apr_rules_mk, \"w\").write(apr_rules_cnt)\n\n def package_info(self):\n self.cpp_info.names[\"pkg_config\"] = \"apr-1\"\n self.cpp_info.libs = [\"libapr-1\" if self.settings.compiler == \"Visual Studio\" and self.options.shared else \"apr-1\"]\n if not self.options.shared:\n self.cpp_info.defines = [\"APR_DECLARE_STATIC\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"dl\", \"pthread\"]\n if self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"rpcrt4\"]\n\n apr_root = self.package_folder\n if tools.os_info.is_windows:\n apr_root = tools.unix_path(apr_root)\n self.output.info(\"Settings APR_ROOT environment var: {}\".format(apr_root))\n self.env_info.APR_ROOT = apr_root\n\n apr_mk_dir = os.path.join(self.package_folder, \"bin\", \"build-1\")\n if tools.os_info.is_windows:\n apr_mk_dir = tools.unix_path(apr_mk_dir)\n self.env_info._APR_BUILDDIR = apr_mk_dir\n", "path": "recipes/apr/all/conanfile.py"}]}
2,617
157
gh_patches_debug_33165
rasdani/github-patches
git_diff
CTFd__CTFd-2147
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CSV for teams+members+fields We should have this CSV format to export but for some reason we don't. Should be an easy implementation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/utils/csv/__init__.py` Content: ``` 1 import csv 2 import json 3 from io import BytesIO, StringIO 4 5 from CTFd.models import ( 6 Flags, 7 Hints, 8 Tags, 9 TeamFields, 10 Teams, 11 UserFields, 12 Users, 13 db, 14 get_class_by_tablename, 15 ) 16 from CTFd.plugins.challenges import get_chal_class 17 from CTFd.schemas.challenges import ChallengeSchema 18 from CTFd.schemas.teams import TeamSchema 19 from CTFd.schemas.users import UserSchema 20 from CTFd.utils.config import is_teams_mode, is_users_mode 21 from CTFd.utils.scores import get_standings 22 23 24 def get_dumpable_tables(): 25 csv_keys = list(CSV_KEYS.keys()) 26 db_keys = list(db.metadata.tables.keys()) 27 tables = csv_keys + db_keys 28 table_keys = list(zip(tables, tables)) 29 return table_keys 30 31 32 def dump_csv(name): 33 dump_func = CSV_KEYS.get(name) 34 if dump_func: 35 return dump_func() 36 elif get_class_by_tablename(name): 37 return dump_database_table(tablename=name) 38 else: 39 raise KeyError 40 41 42 def dump_scoreboard_csv(): 43 # TODO: Add fields to scoreboard data 44 temp = StringIO() 45 writer = csv.writer(temp) 46 47 standings = get_standings() 48 49 # Get all user fields in a specific order 50 user_fields = UserFields.query.all() 51 user_field_ids = [f.id for f in user_fields] 52 user_field_names = [f.name for f in user_fields] 53 54 if is_teams_mode(): 55 team_fields = TeamFields.query.all() 56 team_field_ids = [f.id for f in team_fields] 57 team_field_names = [f.name for f in team_fields] 58 59 header = ( 60 [ 61 "place", 62 "team", 63 "team id", 64 "score", 65 "member name", 66 "member id", 67 "member email", 68 "member score", 69 ] 70 + user_field_names 71 + team_field_names 72 ) 73 writer.writerow(header) 74 75 for i, standing in enumerate(standings): 76 team = Teams.query.filter_by(id=standing.account_id).first() 77 78 # Build field entries using the order of the field values 79 team_field_entries = {f.field_id: f.value for f in team.field_entries} 80 team_field_values = [ 81 team_field_entries.get(f_id, "") for f_id in team_field_ids 82 ] 83 user_field_values = len(user_field_names) * [""] 84 team_row = ( 85 [i + 1, team.name, team.id, standing.score, "", "", "", ""] 86 + user_field_values 87 + team_field_values 88 ) 89 90 writer.writerow(team_row) 91 92 for member in team.members: 93 user_field_entries = {f.field_id: f.value for f in member.field_entries} 94 user_field_values = [ 95 user_field_entries.get(f_id, "") for f_id in user_field_ids 96 ] 97 team_field_values = len(team_field_names) * [""] 98 user_row = ( 99 [ 100 "", 101 "", 102 "", 103 "", 104 member.name, 105 member.id, 106 member.email, 107 member.score, 108 ] 109 + user_field_values 110 + team_field_values 111 ) 112 writer.writerow(user_row) 113 elif is_users_mode(): 114 header = [ 115 "place", 116 "user name", 117 "user id", 118 "user email", 119 "score", 120 ] + user_field_names 121 writer.writerow(header) 122 123 for i, standing in enumerate(standings): 124 user = Users.query.filter_by(id=standing.account_id).first() 125 126 # Build field entries using the order of the field values 127 user_field_entries = {f.field_id: f.value for f in user.field_entries} 128 user_field_values = [ 129 user_field_entries.get(f_id, "") for f_id in user_field_ids 130 ] 131 user_row = [ 132 i + 1, 133 user.name, 134 user.id, 135 user.email, 136 standing.score, 137 ] + user_field_values 138 writer.writerow(user_row) 139 140 # In Python 3 send_file requires bytes 141 output = BytesIO() 142 output.write(temp.getvalue().encode("utf-8")) 143 output.seek(0) 144 temp.close() 145 146 return output 147 148 149 def dump_users_with_fields_csv(): 150 temp = StringIO() 151 writer = csv.writer(temp) 152 153 user_fields = UserFields.query.all() 154 user_field_ids = [f.id for f in user_fields] 155 user_field_names = [f.name for f in user_fields] 156 157 header = [column.name for column in Users.__mapper__.columns] + user_field_names 158 writer.writerow(header) 159 160 responses = Users.query.all() 161 162 for curr in responses: 163 user_field_entries = {f.field_id: f.value for f in curr.field_entries} 164 user_field_values = [ 165 user_field_entries.get(f_id, "") for f_id in user_field_ids 166 ] 167 user_row = [ 168 getattr(curr, column.name) for column in Users.__mapper__.columns 169 ] + user_field_values 170 writer.writerow(user_row) 171 172 temp.seek(0) 173 174 # In Python 3 send_file requires bytes 175 output = BytesIO() 176 output.write(temp.getvalue().encode("utf-8")) 177 output.seek(0) 178 temp.close() 179 180 return output 181 182 183 def dump_teams_with_fields_csv(): 184 temp = StringIO() 185 writer = csv.writer(temp) 186 187 team_fields = TeamFields.query.all() 188 team_field_ids = [f.id for f in team_fields] 189 team_field_names = [f.name for f in team_fields] 190 191 header = [column.name for column in Teams.__mapper__.columns] + team_field_names 192 writer.writerow(header) 193 194 responses = Teams.query.all() 195 196 for curr in responses: 197 team_field_entries = {f.field_id: f.value for f in curr.field_entries} 198 team_field_values = [ 199 team_field_entries.get(f_id, "") for f_id in team_field_ids 200 ] 201 202 team_row = [ 203 getattr(curr, column.name) for column in Teams.__mapper__.columns 204 ] + team_field_values 205 206 writer.writerow(team_row) 207 208 temp.seek(0) 209 210 # In Python 3 send_file requires bytes 211 output = BytesIO() 212 output.write(temp.getvalue().encode("utf-8")) 213 output.seek(0) 214 temp.close() 215 216 return output 217 218 219 def dump_database_table(tablename): 220 # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. 221 model = get_class_by_tablename(tablename) 222 223 if model is None: 224 raise KeyError("Unknown database table") 225 226 temp = StringIO() 227 writer = csv.writer(temp) 228 229 header = [column.name for column in model.__mapper__.columns] 230 writer.writerow(header) 231 232 responses = model.query.all() 233 234 for curr in responses: 235 writer.writerow( 236 [getattr(curr, column.name) for column in model.__mapper__.columns] 237 ) 238 239 temp.seek(0) 240 241 # In Python 3 send_file requires bytes 242 output = BytesIO() 243 output.write(temp.getvalue().encode("utf-8")) 244 output.seek(0) 245 temp.close() 246 247 return output 248 249 250 def load_users_csv(dict_reader): 251 schema = UserSchema() 252 errors = [] 253 for i, line in enumerate(dict_reader): 254 response = schema.load(line) 255 if response.errors: 256 errors.append((i, response.errors)) 257 else: 258 db.session.add(response.data) 259 db.session.commit() 260 if errors: 261 return errors 262 return True 263 264 265 def load_teams_csv(dict_reader): 266 schema = TeamSchema() 267 errors = [] 268 for i, line in enumerate(dict_reader): 269 response = schema.load(line) 270 if response.errors: 271 errors.append((i, response.errors)) 272 else: 273 db.session.add(response.data) 274 db.session.commit() 275 if errors: 276 return errors 277 return True 278 279 280 def load_challenges_csv(dict_reader): 281 schema = ChallengeSchema() 282 errors = [] 283 284 for i, line in enumerate(dict_reader): 285 # Throw away fields that we can't trust if provided 286 _ = line.pop("id", None) 287 _ = line.pop("requirements", None) 288 289 flags = line.pop("flags", None) 290 tags = line.pop("tags", None) 291 hints = line.pop("hints", None) 292 challenge_type = line.pop("type", "standard") 293 294 # Load in custom type_data 295 type_data = json.loads(line.pop("type_data", "{}") or "{}") 296 line.update(type_data) 297 298 response = schema.load(line) 299 if response.errors: 300 errors.append((i + 1, response.errors)) 301 continue 302 303 ChallengeClass = get_chal_class(challenge_type) 304 challenge = ChallengeClass.challenge_model(**line) 305 db.session.add(challenge) 306 db.session.commit() 307 308 if flags: 309 flags = [flag.strip() for flag in flags.split(",")] 310 for flag in flags: 311 f = Flags(type="static", challenge_id=challenge.id, content=flag,) 312 db.session.add(f) 313 db.session.commit() 314 315 if tags: 316 tags = [tag.strip() for tag in tags.split(",")] 317 for tag in tags: 318 t = Tags(challenge_id=challenge.id, value=tag,) 319 db.session.add(t) 320 db.session.commit() 321 322 if hints: 323 hints = [hint.strip() for hint in hints.split(",")] 324 for hint in hints: 325 h = Hints(challenge_id=challenge.id, content=hint,) 326 db.session.add(h) 327 db.session.commit() 328 if errors: 329 return errors 330 return True 331 332 333 CSV_KEYS = { 334 "scoreboard": dump_scoreboard_csv, 335 "users+fields": dump_users_with_fields_csv, 336 "teams+fields": dump_teams_with_fields_csv, 337 } 338 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py --- a/CTFd/utils/csv/__init__.py +++ b/CTFd/utils/csv/__init__.py @@ -216,6 +216,66 @@ return output +def dump_teams_with_members_fields_csv(): + temp = StringIO() + writer = csv.writer(temp) + + team_fields = TeamFields.query.all() + team_field_ids = [f.id for f in team_fields] + team_field_names = [f.name for f in team_fields] + + user_fields = UserFields.query.all() + user_field_ids = [f.id for f in user_fields] + user_field_names = [f.name for f in user_fields] + + user_header = [ + f"member_{column.name}" for column in Users.__mapper__.columns + ] + user_field_names + + header = ( + [column.name for column in Teams.__mapper__.columns] + + team_field_names + + user_header + ) + writer.writerow(header) + + responses = Teams.query.all() + + for curr in responses: + team_field_entries = {f.field_id: f.value for f in curr.field_entries} + team_field_values = [ + team_field_entries.get(f_id, "") for f_id in team_field_ids + ] + + team_row = [ + getattr(curr, column.name) for column in Teams.__mapper__.columns + ] + team_field_values + + writer.writerow(team_row) + + for member in curr.members: + padding = [""] * len(team_row) + + user_field_entries = {f.field_id: f.value for f in member.field_entries} + user_field_values = [ + user_field_entries.get(f_id, "") for f_id in user_field_ids + ] + user_row = [ + getattr(member, column.name) for column in Users.__mapper__.columns + ] + user_field_values + writer.writerow(padding + user_row) + + temp.seek(0) + + # In Python 3 send_file requires bytes + output = BytesIO() + output.write(temp.getvalue().encode("utf-8")) + output.seek(0) + temp.close() + + return output + + def dump_database_table(tablename): # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. model = get_class_by_tablename(tablename) @@ -334,4 +394,5 @@ "scoreboard": dump_scoreboard_csv, "users+fields": dump_users_with_fields_csv, "teams+fields": dump_teams_with_fields_csv, + "teams+members+fields": dump_teams_with_members_fields_csv, }
{"golden_diff": "diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py\n--- a/CTFd/utils/csv/__init__.py\n+++ b/CTFd/utils/csv/__init__.py\n@@ -216,6 +216,66 @@\n return output\n \n \n+def dump_teams_with_members_fields_csv():\n+ temp = StringIO()\n+ writer = csv.writer(temp)\n+\n+ team_fields = TeamFields.query.all()\n+ team_field_ids = [f.id for f in team_fields]\n+ team_field_names = [f.name for f in team_fields]\n+\n+ user_fields = UserFields.query.all()\n+ user_field_ids = [f.id for f in user_fields]\n+ user_field_names = [f.name for f in user_fields]\n+\n+ user_header = [\n+ f\"member_{column.name}\" for column in Users.__mapper__.columns\n+ ] + user_field_names\n+\n+ header = (\n+ [column.name for column in Teams.__mapper__.columns]\n+ + team_field_names\n+ + user_header\n+ )\n+ writer.writerow(header)\n+\n+ responses = Teams.query.all()\n+\n+ for curr in responses:\n+ team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n+ team_field_values = [\n+ team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n+ ]\n+\n+ team_row = [\n+ getattr(curr, column.name) for column in Teams.__mapper__.columns\n+ ] + team_field_values\n+\n+ writer.writerow(team_row)\n+\n+ for member in curr.members:\n+ padding = [\"\"] * len(team_row)\n+\n+ user_field_entries = {f.field_id: f.value for f in member.field_entries}\n+ user_field_values = [\n+ user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n+ ]\n+ user_row = [\n+ getattr(member, column.name) for column in Users.__mapper__.columns\n+ ] + user_field_values\n+ writer.writerow(padding + user_row)\n+\n+ temp.seek(0)\n+\n+ # In Python 3 send_file requires bytes\n+ output = BytesIO()\n+ output.write(temp.getvalue().encode(\"utf-8\"))\n+ output.seek(0)\n+ temp.close()\n+\n+ return output\n+\n+\n def dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n@@ -334,4 +394,5 @@\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n+ \"teams+members+fields\": dump_teams_with_members_fields_csv,\n }\n", "issue": "CSV for teams+members+fields\nWe should have this CSV format to export but for some reason we don't. Should be an easy implementation. \n", "before_files": [{"content": "import csv\nimport json\nfrom io import BytesIO, StringIO\n\nfrom CTFd.models import (\n Flags,\n Hints,\n Tags,\n TeamFields,\n Teams,\n UserFields,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.plugins.challenges import get_chal_class\nfrom CTFd.schemas.challenges import ChallengeSchema\nfrom CTFd.schemas.teams import TeamSchema\nfrom CTFd.schemas.users import UserSchema\nfrom CTFd.utils.config import is_teams_mode, is_users_mode\nfrom CTFd.utils.scores import get_standings\n\n\ndef get_dumpable_tables():\n csv_keys = list(CSV_KEYS.keys())\n db_keys = list(db.metadata.tables.keys())\n tables = csv_keys + db_keys\n table_keys = list(zip(tables, tables))\n return table_keys\n\n\ndef dump_csv(name):\n dump_func = CSV_KEYS.get(name)\n if dump_func:\n return dump_func()\n elif get_class_by_tablename(name):\n return dump_database_table(tablename=name)\n else:\n raise KeyError\n\n\ndef dump_scoreboard_csv():\n # TODO: Add fields to scoreboard data\n temp = StringIO()\n writer = csv.writer(temp)\n\n standings = get_standings()\n\n # Get all user fields in a specific order\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n if is_teams_mode():\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = (\n [\n \"place\",\n \"team\",\n \"team id\",\n \"score\",\n \"member name\",\n \"member id\",\n \"member email\",\n \"member score\",\n ]\n + user_field_names\n + team_field_names\n )\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n team = Teams.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n team_field_entries = {f.field_id: f.value for f in team.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n user_field_values = len(user_field_names) * [\"\"]\n team_row = (\n [i + 1, team.name, team.id, standing.score, \"\", \"\", \"\", \"\"]\n + user_field_values\n + team_field_values\n )\n\n writer.writerow(team_row)\n\n for member in team.members:\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n team_field_values = len(team_field_names) * [\"\"]\n user_row = (\n [\n \"\",\n \"\",\n \"\",\n \"\",\n member.name,\n member.id,\n member.email,\n member.score,\n ]\n + user_field_values\n + team_field_values\n )\n writer.writerow(user_row)\n elif is_users_mode():\n header = [\n \"place\",\n \"user name\",\n \"user id\",\n \"user email\",\n \"score\",\n ] + user_field_names\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n user = Users.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n user_field_entries = {f.field_id: f.value for f in user.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n i + 1,\n user.name,\n user.id,\n user.email,\n standing.score,\n ] + user_field_values\n writer.writerow(user_row)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_users_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n header = [column.name for column in Users.__mapper__.columns] + user_field_names\n writer.writerow(header)\n\n responses = Users.query.all()\n\n for curr in responses:\n user_field_entries = {f.field_id: f.value for f in curr.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(curr, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = [column.name for column in Teams.__mapper__.columns] + team_field_names\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n\n if model is None:\n raise KeyError(\"Unknown database table\")\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef load_users_csv(dict_reader):\n schema = UserSchema()\n errors = []\n for i, line in enumerate(dict_reader):\n response = schema.load(line)\n if response.errors:\n errors.append((i, response.errors))\n else:\n db.session.add(response.data)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\ndef load_teams_csv(dict_reader):\n schema = TeamSchema()\n errors = []\n for i, line in enumerate(dict_reader):\n response = schema.load(line)\n if response.errors:\n errors.append((i, response.errors))\n else:\n db.session.add(response.data)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\ndef load_challenges_csv(dict_reader):\n schema = ChallengeSchema()\n errors = []\n\n for i, line in enumerate(dict_reader):\n # Throw away fields that we can't trust if provided\n _ = line.pop(\"id\", None)\n _ = line.pop(\"requirements\", None)\n\n flags = line.pop(\"flags\", None)\n tags = line.pop(\"tags\", None)\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n\n # Load in custom type_data\n type_data = json.loads(line.pop(\"type_data\", \"{}\") or \"{}\")\n line.update(type_data)\n\n response = schema.load(line)\n if response.errors:\n errors.append((i + 1, response.errors))\n continue\n\n ChallengeClass = get_chal_class(challenge_type)\n challenge = ChallengeClass.challenge_model(**line)\n db.session.add(challenge)\n db.session.commit()\n\n if flags:\n flags = [flag.strip() for flag in flags.split(\",\")]\n for flag in flags:\n f = Flags(type=\"static\", challenge_id=challenge.id, content=flag,)\n db.session.add(f)\n db.session.commit()\n\n if tags:\n tags = [tag.strip() for tag in tags.split(\",\")]\n for tag in tags:\n t = Tags(challenge_id=challenge.id, value=tag,)\n db.session.add(t)\n db.session.commit()\n\n if hints:\n hints = [hint.strip() for hint in hints.split(\",\")]\n for hint in hints:\n h = Hints(challenge_id=challenge.id, content=hint,)\n db.session.add(h)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\nCSV_KEYS = {\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n}\n", "path": "CTFd/utils/csv/__init__.py"}], "after_files": [{"content": "import csv\nimport json\nfrom io import BytesIO, StringIO\n\nfrom CTFd.models import (\n Flags,\n Hints,\n Tags,\n TeamFields,\n Teams,\n UserFields,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.plugins.challenges import get_chal_class\nfrom CTFd.schemas.challenges import ChallengeSchema\nfrom CTFd.schemas.teams import TeamSchema\nfrom CTFd.schemas.users import UserSchema\nfrom CTFd.utils.config import is_teams_mode, is_users_mode\nfrom CTFd.utils.scores import get_standings\n\n\ndef get_dumpable_tables():\n csv_keys = list(CSV_KEYS.keys())\n db_keys = list(db.metadata.tables.keys())\n tables = csv_keys + db_keys\n table_keys = list(zip(tables, tables))\n return table_keys\n\n\ndef dump_csv(name):\n dump_func = CSV_KEYS.get(name)\n if dump_func:\n return dump_func()\n elif get_class_by_tablename(name):\n return dump_database_table(tablename=name)\n else:\n raise KeyError\n\n\ndef dump_scoreboard_csv():\n # TODO: Add fields to scoreboard data\n temp = StringIO()\n writer = csv.writer(temp)\n\n standings = get_standings()\n\n # Get all user fields in a specific order\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n if is_teams_mode():\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = (\n [\n \"place\",\n \"team\",\n \"team id\",\n \"score\",\n \"member name\",\n \"member id\",\n \"member email\",\n \"member score\",\n ]\n + user_field_names\n + team_field_names\n )\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n team = Teams.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n team_field_entries = {f.field_id: f.value for f in team.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n user_field_values = len(user_field_names) * [\"\"]\n team_row = (\n [i + 1, team.name, team.id, standing.score, \"\", \"\", \"\", \"\"]\n + user_field_values\n + team_field_values\n )\n\n writer.writerow(team_row)\n\n for member in team.members:\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n team_field_values = len(team_field_names) * [\"\"]\n user_row = (\n [\n \"\",\n \"\",\n \"\",\n \"\",\n member.name,\n member.id,\n member.email,\n member.score,\n ]\n + user_field_values\n + team_field_values\n )\n writer.writerow(user_row)\n elif is_users_mode():\n header = [\n \"place\",\n \"user name\",\n \"user id\",\n \"user email\",\n \"score\",\n ] + user_field_names\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n user = Users.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n user_field_entries = {f.field_id: f.value for f in user.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n i + 1,\n user.name,\n user.id,\n user.email,\n standing.score,\n ] + user_field_values\n writer.writerow(user_row)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_users_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n header = [column.name for column in Users.__mapper__.columns] + user_field_names\n writer.writerow(header)\n\n responses = Users.query.all()\n\n for curr in responses:\n user_field_entries = {f.field_id: f.value for f in curr.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(curr, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = [column.name for column in Teams.__mapper__.columns] + team_field_names\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_members_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n user_header = [\n f\"member_{column.name}\" for column in Users.__mapper__.columns\n ] + user_field_names\n\n header = (\n [column.name for column in Teams.__mapper__.columns]\n + team_field_names\n + user_header\n )\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n for member in curr.members:\n padding = [\"\"] * len(team_row)\n\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(member, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(padding + user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n\n if model is None:\n raise KeyError(\"Unknown database table\")\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef load_users_csv(dict_reader):\n schema = UserSchema()\n errors = []\n for i, line in enumerate(dict_reader):\n response = schema.load(line)\n if response.errors:\n errors.append((i, response.errors))\n else:\n db.session.add(response.data)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\ndef load_teams_csv(dict_reader):\n schema = TeamSchema()\n errors = []\n for i, line in enumerate(dict_reader):\n response = schema.load(line)\n if response.errors:\n errors.append((i, response.errors))\n else:\n db.session.add(response.data)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\ndef load_challenges_csv(dict_reader):\n schema = ChallengeSchema()\n errors = []\n\n for i, line in enumerate(dict_reader):\n # Throw away fields that we can't trust if provided\n _ = line.pop(\"id\", None)\n _ = line.pop(\"requirements\", None)\n\n flags = line.pop(\"flags\", None)\n tags = line.pop(\"tags\", None)\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n\n # Load in custom type_data\n type_data = json.loads(line.pop(\"type_data\", \"{}\") or \"{}\")\n line.update(type_data)\n\n response = schema.load(line)\n if response.errors:\n errors.append((i + 1, response.errors))\n continue\n\n ChallengeClass = get_chal_class(challenge_type)\n challenge = ChallengeClass.challenge_model(**line)\n db.session.add(challenge)\n db.session.commit()\n\n if flags:\n flags = [flag.strip() for flag in flags.split(\",\")]\n for flag in flags:\n f = Flags(type=\"static\", challenge_id=challenge.id, content=flag,)\n db.session.add(f)\n db.session.commit()\n\n if tags:\n tags = [tag.strip() for tag in tags.split(\",\")]\n for tag in tags:\n t = Tags(challenge_id=challenge.id, value=tag,)\n db.session.add(t)\n db.session.commit()\n\n if hints:\n hints = [hint.strip() for hint in hints.split(\",\")]\n for hint in hints:\n h = Hints(challenge_id=challenge.id, content=hint,)\n db.session.add(h)\n db.session.commit()\n if errors:\n return errors\n return True\n\n\nCSV_KEYS = {\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n \"teams+members+fields\": dump_teams_with_members_fields_csv,\n}\n", "path": "CTFd/utils/csv/__init__.py"}]}
3,320
627
gh_patches_debug_14538
rasdani/github-patches
git_diff
airctic__icevision-189
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- helper function for COCO parser ## 🚀 Feature **Is your feature request related to a problem? Please describe.** As described in #79, it's currently very verbose to create a COCO parser, the steps should be ideally be reduced to a single function call. **Dream API** ```python parser = datasets.coco.parser( annotations_file="path_to_annotations.json", img_dir="path_to_images_dir", ) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mantisshrimp/datasets/coco/parsers.py` Content: ``` 1 __all__ = ["COCOImageInfoParser", "COCOAnnotationParser"] 2 3 from mantisshrimp.imports import * 4 from mantisshrimp.core import * 5 from mantisshrimp.parsers.defaults import * 6 from mantisshrimp.parsers.mixins import * 7 8 9 class COCOImageInfoParser(DefaultImageInfoParser): 10 def __init__(self, infos, img_dir): 11 super().__init__() 12 self.infos = infos 13 self.img_dir = img_dir 14 15 def __iter__(self): 16 yield from self.infos 17 18 def __len__(self): 19 return len(self.infos) 20 21 def imageid(self, o) -> int: 22 return o["id"] 23 24 def filepath(self, o) -> Union[str, Path]: 25 return self.img_dir / o["file_name"] 26 27 def height(self, o) -> int: 28 return o["height"] 29 30 def width(self, o) -> int: 31 return o["width"] 32 33 34 class COCOAnnotationParser(MaskRCNNParser, AreasParserMixin, IsCrowdsParserMixin): 35 def __init__(self, annotations: list): 36 self.annotations = annotations 37 38 def __iter__(self): 39 yield from self.annotations 40 41 def __len__(self): 42 return len(self.annotations) 43 44 def imageid(self, o) -> int: 45 return o["image_id"] 46 47 def labels(self, o) -> List[int]: 48 return [o["category_id"]] 49 50 def bboxes(self, o) -> List[BBox]: 51 return [BBox.from_xywh(*o["bbox"])] 52 53 def areas(self, o) -> List[float]: 54 return [o["area"]] 55 56 def masks(self, o) -> List[MaskArray]: 57 seg = o["segmentation"] 58 if o["iscrowd"]: 59 return [RLE.from_coco(seg["counts"])] 60 else: 61 return [Polygon(seg)] 62 63 def iscrowds(self, o) -> List[bool]: 64 return [o["iscrowd"]] 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mantisshrimp/datasets/coco/parsers.py b/mantisshrimp/datasets/coco/parsers.py --- a/mantisshrimp/datasets/coco/parsers.py +++ b/mantisshrimp/datasets/coco/parsers.py @@ -1,9 +1,23 @@ -__all__ = ["COCOImageInfoParser", "COCOAnnotationParser"] +__all__ = ["parser", "COCOImageInfoParser", "COCOAnnotationParser"] from mantisshrimp.imports import * from mantisshrimp.core import * -from mantisshrimp.parsers.defaults import * -from mantisshrimp.parsers.mixins import * +from mantisshrimp.parsers import * + + +def parser( + annotations_file: Union[str, Path], img_dir: Union[str, Path] +) -> ParserInterface: + annotations_dict = json.loads(Path(annotations_file).read()) + + image_info_parser = COCOImageInfoParser( + infos=annotations_dict["images"], img_dir=img_dir + ) + annotations_parser = COCOAnnotationParser( + annotations=annotations_dict["annotations"] + ) + + return CombinedParser(image_info_parser, annotations_parser) class COCOImageInfoParser(DefaultImageInfoParser):
{"golden_diff": "diff --git a/mantisshrimp/datasets/coco/parsers.py b/mantisshrimp/datasets/coco/parsers.py\n--- a/mantisshrimp/datasets/coco/parsers.py\n+++ b/mantisshrimp/datasets/coco/parsers.py\n@@ -1,9 +1,23 @@\n-__all__ = [\"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n+__all__ = [\"parser\", \"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n \n from mantisshrimp.imports import *\n from mantisshrimp.core import *\n-from mantisshrimp.parsers.defaults import *\n-from mantisshrimp.parsers.mixins import *\n+from mantisshrimp.parsers import *\n+\n+\n+def parser(\n+ annotations_file: Union[str, Path], img_dir: Union[str, Path]\n+) -> ParserInterface:\n+ annotations_dict = json.loads(Path(annotations_file).read())\n+\n+ image_info_parser = COCOImageInfoParser(\n+ infos=annotations_dict[\"images\"], img_dir=img_dir\n+ )\n+ annotations_parser = COCOAnnotationParser(\n+ annotations=annotations_dict[\"annotations\"]\n+ )\n+\n+ return CombinedParser(image_info_parser, annotations_parser)\n \n \n class COCOImageInfoParser(DefaultImageInfoParser):\n", "issue": "helper function for COCO parser\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nAs described in #79, it's currently very verbose to create a COCO parser, the steps should be ideally be reduced to a single function call.\r\n\r\n**Dream API**\r\n```python\r\n parser = datasets.coco.parser(\r\n annotations_file=\"path_to_annotations.json\",\r\n img_dir=\"path_to_images_dir\",\r\n )\r\n```\r\n\n", "before_files": [{"content": "__all__ = [\"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.parsers.defaults import *\nfrom mantisshrimp.parsers.mixins import *\n\n\nclass COCOImageInfoParser(DefaultImageInfoParser):\n def __init__(self, infos, img_dir):\n super().__init__()\n self.infos = infos\n self.img_dir = img_dir\n\n def __iter__(self):\n yield from self.infos\n\n def __len__(self):\n return len(self.infos)\n\n def imageid(self, o) -> int:\n return o[\"id\"]\n\n def filepath(self, o) -> Union[str, Path]:\n return self.img_dir / o[\"file_name\"]\n\n def height(self, o) -> int:\n return o[\"height\"]\n\n def width(self, o) -> int:\n return o[\"width\"]\n\n\nclass COCOAnnotationParser(MaskRCNNParser, AreasParserMixin, IsCrowdsParserMixin):\n def __init__(self, annotations: list):\n self.annotations = annotations\n\n def __iter__(self):\n yield from self.annotations\n\n def __len__(self):\n return len(self.annotations)\n\n def imageid(self, o) -> int:\n return o[\"image_id\"]\n\n def labels(self, o) -> List[int]:\n return [o[\"category_id\"]]\n\n def bboxes(self, o) -> List[BBox]:\n return [BBox.from_xywh(*o[\"bbox\"])]\n\n def areas(self, o) -> List[float]:\n return [o[\"area\"]]\n\n def masks(self, o) -> List[MaskArray]:\n seg = o[\"segmentation\"]\n if o[\"iscrowd\"]:\n return [RLE.from_coco(seg[\"counts\"])]\n else:\n return [Polygon(seg)]\n\n def iscrowds(self, o) -> List[bool]:\n return [o[\"iscrowd\"]]\n", "path": "mantisshrimp/datasets/coco/parsers.py"}], "after_files": [{"content": "__all__ = [\"parser\", \"COCOImageInfoParser\", \"COCOAnnotationParser\"]\n\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.core import *\nfrom mantisshrimp.parsers import *\n\n\ndef parser(\n annotations_file: Union[str, Path], img_dir: Union[str, Path]\n) -> ParserInterface:\n annotations_dict = json.loads(Path(annotations_file).read())\n\n image_info_parser = COCOImageInfoParser(\n infos=annotations_dict[\"images\"], img_dir=img_dir\n )\n annotations_parser = COCOAnnotationParser(\n annotations=annotations_dict[\"annotations\"]\n )\n\n return CombinedParser(image_info_parser, annotations_parser)\n\n\nclass COCOImageInfoParser(DefaultImageInfoParser):\n def __init__(self, infos, img_dir):\n super().__init__()\n self.infos = infos\n self.img_dir = img_dir\n\n def __iter__(self):\n yield from self.infos\n\n def __len__(self):\n return len(self.infos)\n\n def imageid(self, o) -> int:\n return o[\"id\"]\n\n def filepath(self, o) -> Union[str, Path]:\n return self.img_dir / o[\"file_name\"]\n\n def height(self, o) -> int:\n return o[\"height\"]\n\n def width(self, o) -> int:\n return o[\"width\"]\n\n\nclass COCOAnnotationParser(MaskRCNNParser, AreasParserMixin, IsCrowdsParserMixin):\n def __init__(self, annotations: list):\n self.annotations = annotations\n\n def __iter__(self):\n yield from self.annotations\n\n def __len__(self):\n return len(self.annotations)\n\n def imageid(self, o) -> int:\n return o[\"image_id\"]\n\n def labels(self, o) -> List[int]:\n return [o[\"category_id\"]]\n\n def bboxes(self, o) -> List[BBox]:\n return [BBox.from_xywh(*o[\"bbox\"])]\n\n def areas(self, o) -> List[float]:\n return [o[\"area\"]]\n\n def masks(self, o) -> List[MaskArray]:\n seg = o[\"segmentation\"]\n if o[\"iscrowd\"]:\n return [RLE.from_coco(seg[\"counts\"])]\n else:\n return [Polygon(seg)]\n\n def iscrowds(self, o) -> List[bool]:\n return [o[\"iscrowd\"]]\n", "path": "mantisshrimp/datasets/coco/parsers.py"}]}
921
276
gh_patches_debug_707
rasdani/github-patches
git_diff
saulpw__visidata-2269
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- save as csv actually saves tsv when file existing file extension is CSV, i.e. uppercase - PR available #2269 **Small description** Slightly hperbolic ;-) Corruption of file format, opening a csv but saving as csv results in TSV data in a file named csv **Expected result** CSV, not TSV **Actual result with screenshot** If you get an unexpected error, please include the full stack trace that you get with `Ctrl-E`. No error, contents: header1 header2 1 one 2 two **Steps to reproduce with sample data and a .vd** Datafile, called bug.CSV header1,header2 1,one 2,two 1. open data file, MUST have uppercase CSV on end (works fine for lower). E.g., `visidata bug.CSV` 2. save (ctrl-s) 3. hit enter to accept current filename 4. hit `y` to overwrite 5. Display will say saving TSV 6. sanity check file contents First try reproducing without any user configuration by using the flag `-N`. e.g. `echo "abc" | vd -f txt -N` Please attach the commandlog (saved with `Ctrl-D`) to show the steps that led to the issue. See [here](http://visidata.org/docs/save-restore/) for more details. **Additional context** Please include the version of VisiData and Python. Windows: (py311csv) C:\code\py>python Python 3.11.3 (tags/v3.11.3:f3909b8, Apr 4 2023, 23:49:59) [MSC v.1934 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> ^Z (py311csv) C:\code\py>visidata --version saul.pw/VisiData v3.0.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/save.py` Content: ``` 1 import collections 2 import os 3 from copy import copy 4 5 from visidata import vd 6 from visidata import Sheet, BaseSheet, VisiData, IndexSheet, Path, Progress, TypedExceptionWrapper 7 8 vd.option('safe_error', '#ERR', 'error string to use while saving', replay=True) 9 vd.option('save_encoding', 'utf-8', 'encoding passed to codecs.open when saving a file', replay=True, help=vd.help_encoding) 10 11 @Sheet.api 12 def safe_trdict(vs): 13 'returns string.translate dictionary for replacing tabs and newlines' 14 if vs.options.safety_first: 15 delim = vs.options.delimiter 16 return { 17 0: '', # strip NUL completely 18 ord(delim): vs.options.tsv_safe_tab, # \t 19 10: vs.options.tsv_safe_newline, # \n 20 13: vs.options.tsv_safe_newline, # \r 21 } 22 return {} 23 24 25 @Sheet.api 26 def iterdispvals(sheet, *cols, format=False): 27 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.' 28 if not cols: 29 cols = sheet.visibleCols 30 31 transformers = collections.OrderedDict() # list of transformers for each column in order 32 for col in cols: 33 transformers[col] = [ col.type ] 34 if format: 35 formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter)) 36 transformers[col].append(formatMaker(col._formatdict)) 37 trdict = sheet.safe_trdict() 38 if trdict: 39 transformers[col].append(lambda v,trdict=trdict: v.translate(trdict)) 40 41 options_safe_error = sheet.options.safe_error 42 for r in Progress(sheet.rows): 43 dispvals = collections.OrderedDict() # [col] -> value 44 for col, transforms in transformers.items(): 45 try: 46 dispval = col.getValue(r) 47 48 except Exception as e: 49 vd.exceptionCaught(e) 50 dispval = options_safe_error or str(e) 51 52 try: 53 for t in transforms: 54 if dispval is None: 55 break 56 elif isinstance(dispval, TypedExceptionWrapper): 57 dispval = options_safe_error or str(dispval) 58 break 59 else: 60 dispval = t(dispval) 61 62 if dispval is None and format: 63 dispval = '' 64 except Exception as e: 65 dispval = str(dispval) 66 67 dispvals[col] = dispval 68 69 yield dispvals 70 71 72 @Sheet.api 73 def itervals(sheet, *cols, format=False): 74 for row in sheet.iterdispvals(*cols, format=format): 75 yield [row[c] for c in cols] 76 77 @BaseSheet.api 78 def getDefaultSaveName(sheet): 79 src = getattr(sheet, 'source', None) 80 if hasattr(src, 'scheme') and src.scheme: 81 return src.name + src.suffix 82 if isinstance(src, Path): 83 if sheet.options.is_set('save_filetype', sheet): 84 # if save_filetype is over-ridden from default, use it as the extension 85 return str(src.with_suffix('')) + '.' + sheet.options.save_filetype 86 return str(src) 87 else: 88 return sheet.name+'.'+getattr(sheet, 'filetype', sheet.options.save_filetype) 89 90 91 @VisiData.api 92 def save_cols(vd, cols): 93 sheet = cols[0].sheet 94 vs = copy(sheet) 95 vs.columns = list(cols) 96 vs.rows = sheet.rows 97 if len(cols) == 1: 98 savedcoltxt = cols[0].name + ' column' 99 else: 100 savedcoltxt = '%s columns' % len(cols) 101 path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName()) 102 vd.saveSheets(path, vs) 103 104 105 @VisiData.api 106 def saveSheets(vd, givenpath, *vsheets, confirm_overwrite=True): 107 'Save all *vsheets* to *givenpath*.' 108 109 if not vsheets: # blank tuple 110 vd.warning('no sheets to save') 111 return 112 113 filetypes = [givenpath.ext, vd.options.save_filetype] 114 115 vd.clearCaches() 116 117 for ft in filetypes: 118 savefunc = getattr(vsheets[0], 'save_' + ft, None) or getattr(vd, 'save_' + ft, None) 119 if savefunc: 120 filetype = ft 121 break 122 123 if savefunc is None: 124 vd.fail(f'no function to save as {filetype}') 125 126 if confirm_overwrite: 127 vd.confirmOverwrite(givenpath) 128 129 vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype)) 130 131 if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with / 132 for vs in vsheets: 133 vs.hasBeenModified = False 134 # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg) 135 # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path) 136 if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1: 137 vd.fail(f'cannot save multiple {filetype} sheets to non-dir') 138 return vd.execAsync(savefunc, givenpath, *vsheets) 139 140 # path is a dir 141 142 # save as individual files in the givenpath directory 143 try: 144 os.makedirs(givenpath, exist_ok=True) 145 except FileExistsError: 146 pass 147 148 if not givenpath.is_dir(): 149 vd.fail(f'cannot save multiple {filetype} sheets to non-dir') 150 151 def _savefiles(vsheets, givenpath, savefunc, filetype): 152 for vs in vsheets: 153 p = Path((givenpath / vs.name).with_suffix('.'+filetype)) 154 savefunc(p, vs) 155 vs.hasBeenModified = False 156 157 vd.status(f'{givenpath} save finished') #2157 158 159 return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype) 160 161 162 @VisiData.api 163 def save_zip(vd, p, *vsheets): 164 vd.clearCaches() 165 166 import tempfile 167 import zipfile 168 with tempfile.TemporaryDirectory() as tmpdir: 169 with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp: 170 for vs in Progress(vsheets): 171 filetype = vs.options.save_filetype 172 tmpp = Path(f'{tmpdir}{vs.name}.{filetype}') 173 savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None) 174 savefunc(tmpp, vs) 175 zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}') 176 177 178 @VisiData.api 179 def save_txt(vd, p, *vsheets): 180 if len(vsheets) == 1 and vsheets[0].nVisibleCols > 1: #2173 181 return vd.save_tsv(p, vsheets[0]) 182 183 with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp: 184 for vs in vsheets: 185 unitsep = vs.options.delimiter 186 rowsep = vs.options.row_delimiter 187 for dispvals in vs.iterdispvals(*vs.visibleCols, format=True): 188 fp.write(unitsep.join(dispvals.values())) 189 fp.write(rowsep) 190 191 192 @BaseSheet.api 193 def rootSheet(sheet): 194 r = sheet 195 while isinstance(r.source, BaseSheet): 196 r = r.source 197 198 return r 199 200 201 BaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath("save to: ", value=getDefaultSaveName()), sheet)', 'save current sheet to filename in format determined by extension (default .tsv)') 202 BaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation') 203 BaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet())', 'save root sheet to its source') 204 BaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath("save all sheets to: "), *vd.stackedSheets)', 'save all sheets to given file or directory)') 205 IndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath("save %d sheets to: " % nSelectedRows, value="_".join(getattr(vs, "name", None) or "blank" for vs in selectedRows)), *selectedRows)', 'save all selected sheets to given file or directory') 206 Sheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)') 207 Sheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)') 208 209 vd.addMenuItems(''' 210 File > Save > current sheet > save-sheet 211 File > Save > all sheets > save-all 212 File > Save > current column > save-col 213 File > Save > keys and current column > save-col-keys 214 ''') 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/save.py b/visidata/save.py --- a/visidata/save.py +++ b/visidata/save.py @@ -110,7 +110,7 @@ vd.warning('no sheets to save') return - filetypes = [givenpath.ext, vd.options.save_filetype] + filetypes = [givenpath.ext.lower(), vd.options.save_filetype.lower()] vd.clearCaches()
{"golden_diff": "diff --git a/visidata/save.py b/visidata/save.py\n--- a/visidata/save.py\n+++ b/visidata/save.py\n@@ -110,7 +110,7 @@\n vd.warning('no sheets to save')\n return\n \n- filetypes = [givenpath.ext, vd.options.save_filetype]\n+ filetypes = [givenpath.ext.lower(), vd.options.save_filetype.lower()]\n \n vd.clearCaches()\n", "issue": "save as csv actually saves tsv when file existing file extension is CSV, i.e. uppercase - PR available #2269\n**Small description**\r\n\r\nSlightly hperbolic ;-) Corruption of file format, opening a csv but saving as csv results in TSV data in a file named csv\r\n\r\n**Expected result**\r\n\r\nCSV, not TSV\r\n\r\n**Actual result with screenshot**\r\n\r\nIf you get an unexpected error, please include the full stack trace that you get with `Ctrl-E`.\r\nNo error, contents:\r\n\r\n header1\theader2\r\n 1\tone\r\n 2\ttwo\r\n\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\nDatafile, called bug.CSV\r\n\r\n header1,header2\r\n 1,one\r\n 2,two\r\n\r\n\r\n1. open data file, MUST have uppercase CSV on end (works fine for lower). E.g., `visidata bug.CSV`\r\n2. save (ctrl-s)\r\n3. hit enter to accept current filename\r\n4. hit `y` to overwrite\r\n5. Display will say saving TSV\r\n6. sanity check file contents\r\n\r\nFirst try reproducing without any user configuration by using the flag `-N`.\r\ne.g. `echo \"abc\" | vd -f txt -N`\r\n\r\nPlease attach the commandlog (saved with `Ctrl-D`) to show the steps that led to the issue.\r\nSee [here](http://visidata.org/docs/save-restore/) for more details.\r\n\r\n**Additional context**\r\nPlease include the version of VisiData and Python.\r\n\r\nWindows:\r\n\r\n (py311csv) C:\\code\\py>python\r\n Python 3.11.3 (tags/v3.11.3:f3909b8, Apr 4 2023, 23:49:59) [MSC v.1934 64 bit (AMD64)] on win32\r\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n >>> ^Z\r\n\r\n\r\n (py311csv) C:\\code\\py>visidata --version\r\n saul.pw/VisiData v3.0.2\r\n\r\n\n", "before_files": [{"content": "import collections\nimport os\nfrom copy import copy\n\nfrom visidata import vd\nfrom visidata import Sheet, BaseSheet, VisiData, IndexSheet, Path, Progress, TypedExceptionWrapper\n\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\nvd.option('save_encoding', 'utf-8', 'encoding passed to codecs.open when saving a file', replay=True, help=vd.help_encoding)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if vs.options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = sheet.options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', sheet.options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=True):\n 'Save all *vsheets* to *givenpath*.'\n\n if not vsheets: # blank tuple\n vd.warning('no sheets to save')\n return\n\n filetypes = [givenpath.ext, vd.options.save_filetype]\n\n vd.clearCaches()\n\n for ft in filetypes:\n savefunc = getattr(vsheets[0], 'save_' + ft, None) or getattr(vd, 'save_' + ft, None)\n if savefunc:\n filetype = ft\n break\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if confirm_overwrite:\n vd.confirmOverwrite(givenpath)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n\n vd.status(f'{givenpath} save finished') #2157\n\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n if len(vsheets) == 1 and vsheets[0].nVisibleCols > 1: #2173\n return vd.save_tsv(p, vsheets[0])\n\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet())', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n\nvd.addMenuItems('''\n File > Save > current sheet > save-sheet\n File > Save > all sheets > save-all\n File > Save > current column > save-col\n File > Save > keys and current column > save-col-keys\n''')\n", "path": "visidata/save.py"}], "after_files": [{"content": "import collections\nimport os\nfrom copy import copy\n\nfrom visidata import vd\nfrom visidata import Sheet, BaseSheet, VisiData, IndexSheet, Path, Progress, TypedExceptionWrapper\n\nvd.option('safe_error', '#ERR', 'error string to use while saving', replay=True)\nvd.option('save_encoding', 'utf-8', 'encoding passed to codecs.open when saving a file', replay=True, help=vd.help_encoding)\n\[email protected]\ndef safe_trdict(vs):\n 'returns string.translate dictionary for replacing tabs and newlines'\n if vs.options.safety_first:\n delim = vs.options.delimiter\n return {\n 0: '', # strip NUL completely\n ord(delim): vs.options.tsv_safe_tab, # \\t\n 10: vs.options.tsv_safe_newline, # \\n\n 13: vs.options.tsv_safe_newline, # \\r\n }\n return {}\n\n\[email protected]\ndef iterdispvals(sheet, *cols, format=False):\n 'For each row in sheet, yield OrderedDict of values for given cols. Values are typed if format=False, or a formatted display string if format=True.'\n if not cols:\n cols = sheet.visibleCols\n\n transformers = collections.OrderedDict() # list of transformers for each column in order\n for col in cols:\n transformers[col] = [ col.type ]\n if format:\n formatMaker = getattr(col, 'formatter_'+(col.formatter or sheet.options.disp_formatter))\n transformers[col].append(formatMaker(col._formatdict))\n trdict = sheet.safe_trdict()\n if trdict:\n transformers[col].append(lambda v,trdict=trdict: v.translate(trdict))\n\n options_safe_error = sheet.options.safe_error\n for r in Progress(sheet.rows):\n dispvals = collections.OrderedDict() # [col] -> value\n for col, transforms in transformers.items():\n try:\n dispval = col.getValue(r)\n\n except Exception as e:\n vd.exceptionCaught(e)\n dispval = options_safe_error or str(e)\n\n try:\n for t in transforms:\n if dispval is None:\n break\n elif isinstance(dispval, TypedExceptionWrapper):\n dispval = options_safe_error or str(dispval)\n break\n else:\n dispval = t(dispval)\n\n if dispval is None and format:\n dispval = ''\n except Exception as e:\n dispval = str(dispval)\n\n dispvals[col] = dispval\n\n yield dispvals\n\n\[email protected]\ndef itervals(sheet, *cols, format=False):\n for row in sheet.iterdispvals(*cols, format=format):\n yield [row[c] for c in cols]\n\[email protected]\ndef getDefaultSaveName(sheet):\n src = getattr(sheet, 'source', None)\n if hasattr(src, 'scheme') and src.scheme:\n return src.name + src.suffix\n if isinstance(src, Path):\n if sheet.options.is_set('save_filetype', sheet):\n # if save_filetype is over-ridden from default, use it as the extension\n return str(src.with_suffix('')) + '.' + sheet.options.save_filetype\n return str(src)\n else:\n return sheet.name+'.'+getattr(sheet, 'filetype', sheet.options.save_filetype)\n\n\[email protected]\ndef save_cols(vd, cols):\n sheet = cols[0].sheet\n vs = copy(sheet)\n vs.columns = list(cols)\n vs.rows = sheet.rows\n if len(cols) == 1:\n savedcoltxt = cols[0].name + ' column'\n else:\n savedcoltxt = '%s columns' % len(cols)\n path = vd.inputPath('save %s to: ' % savedcoltxt, value=vs.getDefaultSaveName())\n vd.saveSheets(path, vs)\n\n\[email protected]\ndef saveSheets(vd, givenpath, *vsheets, confirm_overwrite=True):\n 'Save all *vsheets* to *givenpath*.'\n\n if not vsheets: # blank tuple\n vd.warning('no sheets to save')\n return\n\n filetypes = [givenpath.ext.lower(), vd.options.save_filetype.lower()]\n\n vd.clearCaches()\n\n for ft in filetypes:\n savefunc = getattr(vsheets[0], 'save_' + ft, None) or getattr(vd, 'save_' + ft, None)\n if savefunc:\n filetype = ft\n break\n\n if savefunc is None:\n vd.fail(f'no function to save as {filetype}')\n\n if confirm_overwrite:\n vd.confirmOverwrite(givenpath)\n\n vd.status('saving %s sheets to %s as %s' % (len(vsheets), givenpath.given, filetype))\n\n if not givenpath.given.endswith('/'): # forcibly specify save individual files into directory by ending path with /\n for vs in vsheets:\n vs.hasBeenModified = False\n # savefuncs(vd, p, *vsheets) will have 2 argcount (*vsheets does not get counted as an arg)\n # savefuncs(vd, p, vs) will have 3 argcount (vs counts as an arg, along with vd, path)\n if savefunc.__code__.co_argcount == 3 and len(vsheets) > 1:\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n return vd.execAsync(savefunc, givenpath, *vsheets)\n\n # path is a dir\n\n # save as individual files in the givenpath directory\n try:\n os.makedirs(givenpath, exist_ok=True)\n except FileExistsError:\n pass\n\n if not givenpath.is_dir():\n vd.fail(f'cannot save multiple {filetype} sheets to non-dir')\n\n def _savefiles(vsheets, givenpath, savefunc, filetype):\n for vs in vsheets:\n p = Path((givenpath / vs.name).with_suffix('.'+filetype))\n savefunc(p, vs)\n vs.hasBeenModified = False\n\n vd.status(f'{givenpath} save finished') #2157\n\n return vd.execAsync(_savefiles, vsheets, givenpath, savefunc, filetype)\n\n\[email protected]\ndef save_zip(vd, p, *vsheets):\n vd.clearCaches()\n\n import tempfile\n import zipfile\n with tempfile.TemporaryDirectory() as tmpdir:\n with zipfile.ZipFile(str(p), 'w', zipfile.ZIP_DEFLATED, allowZip64=True, compresslevel=9) as zfp:\n for vs in Progress(vsheets):\n filetype = vs.options.save_filetype\n tmpp = Path(f'{tmpdir}{vs.name}.{filetype}')\n savefunc = getattr(vs, 'save_' + filetype, None) or getattr(vd, 'save_' + filetype, None)\n savefunc(tmpp, vs)\n zfp.write(tmpp, f'{vs.name}.{vs.options.save_filetype}')\n\n\[email protected]\ndef save_txt(vd, p, *vsheets):\n if len(vsheets) == 1 and vsheets[0].nVisibleCols > 1: #2173\n return vd.save_tsv(p, vsheets[0])\n\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for vs in vsheets:\n unitsep = vs.options.delimiter\n rowsep = vs.options.row_delimiter\n for dispvals in vs.iterdispvals(*vs.visibleCols, format=True):\n fp.write(unitsep.join(dispvals.values()))\n fp.write(rowsep)\n\n\[email protected]\ndef rootSheet(sheet):\n r = sheet\n while isinstance(r.source, BaseSheet):\n r = r.source\n\n return r\n\n\nBaseSheet.addCommand('^S', 'save-sheet', 'vd.saveSheets(inputPath(\"save to: \", value=getDefaultSaveName()), sheet)', 'save current sheet to filename in format determined by extension (default .tsv)')\nBaseSheet.addCommand('', 'save-sheet-really', 'vd.saveSheets(Path(getDefaultSaveName()), sheet, confirm_overwrite=False)', 'save current sheet without asking for filename or confirmation')\nBaseSheet.addCommand('', 'save-source', 'vd.saveSheets(rootSheet().source, rootSheet())', 'save root sheet to its source')\nBaseSheet.addCommand('g^S', 'save-all', 'vd.saveSheets(inputPath(\"save all sheets to: \"), *vd.stackedSheets)', 'save all sheets to given file or directory)')\nIndexSheet.addCommand('g^S', 'save-selected', 'vd.saveSheets(inputPath(\"save %d sheets to: \" % nSelectedRows, value=\"_\".join(getattr(vs, \"name\", None) or \"blank\" for vs in selectedRows)), *selectedRows)', 'save all selected sheets to given file or directory')\nSheet.addCommand('', 'save-col', 'save_cols([cursorCol])', 'save current column only to filename in format determined by extension (default .tsv)')\nSheet.addCommand('', 'save-col-keys', 'save_cols(keyCols + [cursorCol])', 'save key columns and current column to filename in format determined by extension (default .tsv)')\n\nvd.addMenuItems('''\n File > Save > current sheet > save-sheet\n File > Save > all sheets > save-all\n File > Save > current column > save-col\n File > Save > keys and current column > save-col-keys\n''')\n", "path": "visidata/save.py"}]}
3,350
99
gh_patches_debug_7827
rasdani/github-patches
git_diff
lightly-ai__lightly-455
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 447 register dataset before upload pw # 447 register dataset before upload Closes #447. - Adds the generated api code to register dataset uploads and uses them. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightly/cli/upload_cli.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """**Lightly Upload:** Upload images to the Lightly platform. 3 4 This module contains the entrypoint for the **lightly-upload** 5 command-line interface. 6 """ 7 8 # Copyright (c) 2020. Lightly AG and its affiliates. 9 # All Rights Reserved 10 import json 11 12 import hydra 13 14 import torchvision 15 from torch.utils.hipify.hipify_python import bcolors 16 17 from lightly.cli._helpers import fix_input_path, print_as_warning, cpu_count 18 19 from lightly.api.utils import getenv 20 from lightly.api.api_workflow_client import ApiWorkflowClient 21 from lightly.data import LightlyDataset 22 23 24 def _upload_cli(cfg, is_cli_call=True): 25 input_dir = cfg['input_dir'] 26 if input_dir and is_cli_call: 27 input_dir = fix_input_path(input_dir) 28 29 path_to_embeddings = cfg['embeddings'] 30 if path_to_embeddings and is_cli_call: 31 path_to_embeddings = fix_input_path(path_to_embeddings) 32 33 dataset_id = cfg['dataset_id'] 34 token = cfg['token'] 35 new_dataset_name = cfg['new_dataset_name'] 36 37 cli_api_args_wrong = False 38 if not token: 39 print_as_warning('Please specify your access token.') 40 cli_api_args_wrong = True 41 42 dataset_id_ok = dataset_id and len(dataset_id) > 0 43 new_dataset_name_ok = new_dataset_name and len(new_dataset_name) > 0 44 if new_dataset_name_ok and not dataset_id_ok: 45 api_workflow_client = ApiWorkflowClient(token=token) 46 api_workflow_client.create_dataset(dataset_name=new_dataset_name) 47 elif dataset_id_ok and not new_dataset_name_ok: 48 api_workflow_client = ApiWorkflowClient(token=token, dataset_id=dataset_id) 49 else: 50 print_as_warning('Please specify either the dataset_id of an existing dataset or a new_dataset_name.') 51 cli_api_args_wrong = True 52 53 if cli_api_args_wrong: 54 print_as_warning('For help, try: lightly-upload --help') 55 return 56 57 # potentially load custom metadata 58 custom_metadata = None 59 if cfg['custom_metadata']: 60 path_to_custom_metadata = fix_input_path(cfg['custom_metadata']) 61 print(f'Loading custom metadata from {bcolors.OKBLUE}{path_to_custom_metadata}{bcolors.ENDC}') 62 with open(path_to_custom_metadata, 'r') as f: 63 custom_metadata = json.load(f) 64 65 # determine the number of available cores 66 if cfg['loader']['num_workers'] < 0: 67 cfg['loader']['num_workers'] = cpu_count() 68 69 size = cfg['resize'] 70 if not isinstance(size, int): 71 size = tuple(size) 72 transform = None 73 if isinstance(size, tuple) or size > 0: 74 transform = torchvision.transforms.Resize(size) 75 76 if input_dir: 77 mode = cfg['upload'] 78 dataset = LightlyDataset(input_dir=input_dir, transform=transform) 79 api_workflow_client.upload_dataset( 80 input=dataset, 81 mode=mode, 82 max_workers=cfg['loader']['num_workers'], 83 custom_metadata=custom_metadata, 84 ) 85 print(f"Finished the upload of the dataset.") 86 87 if path_to_embeddings: 88 name = cfg['embedding_name'] 89 print("Starting upload of embeddings.") 90 api_workflow_client.upload_embeddings( 91 path_to_embeddings_csv=path_to_embeddings, name=name 92 ) 93 print("Finished upload of embeddings.") 94 95 if new_dataset_name_ok: 96 print(f'The dataset_id of the newly created dataset is ' 97 f'{bcolors.OKBLUE}{api_workflow_client.dataset_id}{bcolors.ENDC}') 98 99 100 @hydra.main(config_path='config', config_name='config') 101 def upload_cli(cfg): 102 """Upload images/embeddings from the command-line to the Lightly platform. 103 104 Args: 105 cfg: 106 The default configs are loaded from the config file. 107 To overwrite them please see the section on the config file 108 (.config.config.yaml). 109 110 Command-Line Args: 111 input_dir: 112 Path to the input directory where images are stored. 113 embeddings: 114 Path to the csv file storing the embeddings generated by 115 lightly. 116 token: 117 User access token to the Lightly platform. If needs to be 118 specified to upload the images and embeddings to the platform. 119 dataset_id: 120 Identifier of the dataset on the Lightly platform. 121 Either the dataset_id or the new_dataset_name need to be 122 specified. 123 new_dataset_name: 124 The name of the new dataset to create on the Lightly platform. 125 Either the dataset_id or the new_dataset_name need to be 126 specified. 127 upload: 128 String to determine whether to upload the full images, 129 thumbnails only, or metadata only. 130 131 Must be one of ['full', 'thumbnails', 'metadata'] 132 embedding_name: 133 Assign the embedding a name in order to identify it on the 134 Lightly platform. 135 resize: 136 Desired size of the uploaded images. If negative, default size is used. 137 If size is a sequence like (h, w), output size will be matched to 138 this. If size is an int, smaller edge of the image will be matched 139 to this number. i.e, if height > width, then image will be rescaled 140 to (size * height / width, size). 141 142 Examples: 143 >>> # create a new dataset on the Lightly platform and upload thumbnails to it 144 >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz' 145 >>> 146 >>> # upload thumbnails to the Lightly platform to an existing dataset 147 >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ' 148 >>> 149 >>> # create a new dataset on the Lightly platform and upload full images to it 150 >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz' upload='full' 151 >>> 152 >>> # upload metadata to the Lightly platform 153 >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ' upload='metadata' 154 >>> 155 >>> # upload embeddings to the Lightly platform (must have uploaded images beforehand) 156 >>> lightly-upload embeddings=embeddings.csv token='123' dataset_id='XYZ' 157 >>> 158 >>> # upload both, images and embeddings in a single command 159 >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\ 160 >>> token='123' dataset_id='XYZ' 161 >>> # create a new dataset on the Lightly platform and upload both, images and embeddings 162 >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\ 163 >>> token='123' new_dataset_name='new_dataset_name_xyz' 164 165 """ 166 _upload_cli(cfg) 167 168 169 def entry(): 170 upload_cli() 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightly/cli/upload_cli.py b/lightly/cli/upload_cli.py --- a/lightly/cli/upload_cli.py +++ b/lightly/cli/upload_cli.py @@ -92,6 +92,10 @@ ) print("Finished upload of embeddings.") + if custom_metadata is not None and not input_dir: + # upload custom metadata separately + api_workflow_client.upload_custom_metadata(custom_metadata, verbose=True) + if new_dataset_name_ok: print(f'The dataset_id of the newly created dataset is ' f'{bcolors.OKBLUE}{api_workflow_client.dataset_id}{bcolors.ENDC}')
{"golden_diff": "diff --git a/lightly/cli/upload_cli.py b/lightly/cli/upload_cli.py\n--- a/lightly/cli/upload_cli.py\n+++ b/lightly/cli/upload_cli.py\n@@ -92,6 +92,10 @@\n )\n print(\"Finished upload of embeddings.\")\n \n+ if custom_metadata is not None and not input_dir:\n+ # upload custom metadata separately\n+ api_workflow_client.upload_custom_metadata(custom_metadata, verbose=True)\n+\n if new_dataset_name_ok:\n print(f'The dataset_id of the newly created dataset is '\n f'{bcolors.OKBLUE}{api_workflow_client.dataset_id}{bcolors.ENDC}')\n", "issue": "447 register dataset before upload pw\n# 447 register dataset before upload\r\n\r\nCloses #447.\r\n\r\n- Adds the generated api code to register dataset uploads and uses them.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"**Lightly Upload:** Upload images to the Lightly platform.\n\nThis module contains the entrypoint for the **lightly-upload**\ncommand-line interface.\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\nimport json\n\nimport hydra\n\nimport torchvision\nfrom torch.utils.hipify.hipify_python import bcolors\n\nfrom lightly.cli._helpers import fix_input_path, print_as_warning, cpu_count\n\nfrom lightly.api.utils import getenv\nfrom lightly.api.api_workflow_client import ApiWorkflowClient\nfrom lightly.data import LightlyDataset\n\n\ndef _upload_cli(cfg, is_cli_call=True):\n input_dir = cfg['input_dir']\n if input_dir and is_cli_call:\n input_dir = fix_input_path(input_dir)\n\n path_to_embeddings = cfg['embeddings']\n if path_to_embeddings and is_cli_call:\n path_to_embeddings = fix_input_path(path_to_embeddings)\n\n dataset_id = cfg['dataset_id']\n token = cfg['token']\n new_dataset_name = cfg['new_dataset_name']\n\n cli_api_args_wrong = False\n if not token:\n print_as_warning('Please specify your access token.')\n cli_api_args_wrong = True\n\n dataset_id_ok = dataset_id and len(dataset_id) > 0\n new_dataset_name_ok = new_dataset_name and len(new_dataset_name) > 0\n if new_dataset_name_ok and not dataset_id_ok:\n api_workflow_client = ApiWorkflowClient(token=token)\n api_workflow_client.create_dataset(dataset_name=new_dataset_name)\n elif dataset_id_ok and not new_dataset_name_ok:\n api_workflow_client = ApiWorkflowClient(token=token, dataset_id=dataset_id)\n else:\n print_as_warning('Please specify either the dataset_id of an existing dataset or a new_dataset_name.')\n cli_api_args_wrong = True\n\n if cli_api_args_wrong:\n print_as_warning('For help, try: lightly-upload --help')\n return\n\n # potentially load custom metadata\n custom_metadata = None\n if cfg['custom_metadata']:\n path_to_custom_metadata = fix_input_path(cfg['custom_metadata'])\n print(f'Loading custom metadata from {bcolors.OKBLUE}{path_to_custom_metadata}{bcolors.ENDC}')\n with open(path_to_custom_metadata, 'r') as f:\n custom_metadata = json.load(f)\n\n # determine the number of available cores\n if cfg['loader']['num_workers'] < 0:\n cfg['loader']['num_workers'] = cpu_count()\n\n size = cfg['resize']\n if not isinstance(size, int):\n size = tuple(size)\n transform = None\n if isinstance(size, tuple) or size > 0:\n transform = torchvision.transforms.Resize(size)\n\n if input_dir:\n mode = cfg['upload']\n dataset = LightlyDataset(input_dir=input_dir, transform=transform)\n api_workflow_client.upload_dataset(\n input=dataset,\n mode=mode,\n max_workers=cfg['loader']['num_workers'],\n custom_metadata=custom_metadata,\n )\n print(f\"Finished the upload of the dataset.\")\n\n if path_to_embeddings:\n name = cfg['embedding_name']\n print(\"Starting upload of embeddings.\")\n api_workflow_client.upload_embeddings(\n path_to_embeddings_csv=path_to_embeddings, name=name\n )\n print(\"Finished upload of embeddings.\")\n\n if new_dataset_name_ok:\n print(f'The dataset_id of the newly created dataset is '\n f'{bcolors.OKBLUE}{api_workflow_client.dataset_id}{bcolors.ENDC}')\n\n\[email protected](config_path='config', config_name='config')\ndef upload_cli(cfg):\n \"\"\"Upload images/embeddings from the command-line to the Lightly platform.\n\n Args:\n cfg:\n The default configs are loaded from the config file.\n To overwrite them please see the section on the config file \n (.config.config.yaml).\n \n Command-Line Args:\n input_dir:\n Path to the input directory where images are stored.\n embeddings:\n Path to the csv file storing the embeddings generated by\n lightly.\n token:\n User access token to the Lightly platform. If needs to be\n specified to upload the images and embeddings to the platform.\n dataset_id:\n Identifier of the dataset on the Lightly platform.\n Either the dataset_id or the new_dataset_name need to be\n specified.\n new_dataset_name:\n The name of the new dataset to create on the Lightly platform.\n Either the dataset_id or the new_dataset_name need to be\n specified.\n upload:\n String to determine whether to upload the full images, \n thumbnails only, or metadata only.\n\n Must be one of ['full', 'thumbnails', 'metadata']\n embedding_name:\n Assign the embedding a name in order to identify it on the \n Lightly platform.\n resize:\n Desired size of the uploaded images. If negative, default size is used.\n If size is a sequence like (h, w), output size will be matched to \n this. If size is an int, smaller edge of the image will be matched \n to this number. i.e, if height > width, then image will be rescaled\n to (size * height / width, size).\n\n Examples:\n >>> # create a new dataset on the Lightly platform and upload thumbnails to it\n >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz'\n >>>\n >>> # upload thumbnails to the Lightly platform to an existing dataset\n >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ'\n >>> \n >>> # create a new dataset on the Lightly platform and upload full images to it\n >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz' upload='full'\n >>>\n >>> # upload metadata to the Lightly platform\n >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ' upload='metadata'\n >>>\n >>> # upload embeddings to the Lightly platform (must have uploaded images beforehand)\n >>> lightly-upload embeddings=embeddings.csv token='123' dataset_id='XYZ'\n >>>\n >>> # upload both, images and embeddings in a single command\n >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\\\\n >>> token='123' dataset_id='XYZ'\n >>> # create a new dataset on the Lightly platform and upload both, images and embeddings\n >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\\\\n >>> token='123' new_dataset_name='new_dataset_name_xyz'\n\n \"\"\"\n _upload_cli(cfg)\n\n\ndef entry():\n upload_cli()\n", "path": "lightly/cli/upload_cli.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"**Lightly Upload:** Upload images to the Lightly platform.\n\nThis module contains the entrypoint for the **lightly-upload**\ncommand-line interface.\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\nimport json\n\nimport hydra\n\nimport torchvision\nfrom torch.utils.hipify.hipify_python import bcolors\n\nfrom lightly.cli._helpers import fix_input_path, print_as_warning, cpu_count\n\nfrom lightly.api.utils import getenv\nfrom lightly.api.api_workflow_client import ApiWorkflowClient\nfrom lightly.data import LightlyDataset\n\n\ndef _upload_cli(cfg, is_cli_call=True):\n input_dir = cfg['input_dir']\n if input_dir and is_cli_call:\n input_dir = fix_input_path(input_dir)\n\n path_to_embeddings = cfg['embeddings']\n if path_to_embeddings and is_cli_call:\n path_to_embeddings = fix_input_path(path_to_embeddings)\n\n dataset_id = cfg['dataset_id']\n token = cfg['token']\n new_dataset_name = cfg['new_dataset_name']\n\n cli_api_args_wrong = False\n if not token:\n print_as_warning('Please specify your access token.')\n cli_api_args_wrong = True\n\n dataset_id_ok = dataset_id and len(dataset_id) > 0\n new_dataset_name_ok = new_dataset_name and len(new_dataset_name) > 0\n if new_dataset_name_ok and not dataset_id_ok:\n api_workflow_client = ApiWorkflowClient(token=token)\n api_workflow_client.create_dataset(dataset_name=new_dataset_name)\n elif dataset_id_ok and not new_dataset_name_ok:\n api_workflow_client = ApiWorkflowClient(token=token, dataset_id=dataset_id)\n else:\n print_as_warning('Please specify either the dataset_id of an existing dataset or a new_dataset_name.')\n cli_api_args_wrong = True\n\n if cli_api_args_wrong:\n print_as_warning('For help, try: lightly-upload --help')\n return\n\n # potentially load custom metadata\n custom_metadata = None\n if cfg['custom_metadata']:\n path_to_custom_metadata = fix_input_path(cfg['custom_metadata'])\n print(f'Loading custom metadata from {bcolors.OKBLUE}{path_to_custom_metadata}{bcolors.ENDC}')\n with open(path_to_custom_metadata, 'r') as f:\n custom_metadata = json.load(f)\n\n # determine the number of available cores\n if cfg['loader']['num_workers'] < 0:\n cfg['loader']['num_workers'] = cpu_count()\n\n size = cfg['resize']\n if not isinstance(size, int):\n size = tuple(size)\n transform = None\n if isinstance(size, tuple) or size > 0:\n transform = torchvision.transforms.Resize(size)\n\n if input_dir:\n mode = cfg['upload']\n dataset = LightlyDataset(input_dir=input_dir, transform=transform)\n api_workflow_client.upload_dataset(\n input=dataset,\n mode=mode,\n max_workers=cfg['loader']['num_workers'],\n custom_metadata=custom_metadata,\n )\n print(f\"Finished the upload of the dataset.\")\n\n if path_to_embeddings:\n name = cfg['embedding_name']\n print(\"Starting upload of embeddings.\")\n api_workflow_client.upload_embeddings(\n path_to_embeddings_csv=path_to_embeddings, name=name\n )\n print(\"Finished upload of embeddings.\")\n\n if custom_metadata is not None and not input_dir:\n # upload custom metadata separately\n api_workflow_client.upload_custom_metadata(custom_metadata, verbose=True)\n\n if new_dataset_name_ok:\n print(f'The dataset_id of the newly created dataset is '\n f'{bcolors.OKBLUE}{api_workflow_client.dataset_id}{bcolors.ENDC}')\n\n\[email protected](config_path='config', config_name='config')\ndef upload_cli(cfg):\n \"\"\"Upload images/embeddings from the command-line to the Lightly platform.\n\n Args:\n cfg:\n The default configs are loaded from the config file.\n To overwrite them please see the section on the config file \n (.config.config.yaml).\n \n Command-Line Args:\n input_dir:\n Path to the input directory where images are stored.\n embeddings:\n Path to the csv file storing the embeddings generated by\n lightly.\n token:\n User access token to the Lightly platform. If needs to be\n specified to upload the images and embeddings to the platform.\n dataset_id:\n Identifier of the dataset on the Lightly platform.\n Either the dataset_id or the new_dataset_name need to be\n specified.\n new_dataset_name:\n The name of the new dataset to create on the Lightly platform.\n Either the dataset_id or the new_dataset_name need to be\n specified.\n upload:\n String to determine whether to upload the full images, \n thumbnails only, or metadata only.\n\n Must be one of ['full', 'thumbnails', 'metadata']\n embedding_name:\n Assign the embedding a name in order to identify it on the \n Lightly platform.\n resize:\n Desired size of the uploaded images. If negative, default size is used.\n If size is a sequence like (h, w), output size will be matched to \n this. If size is an int, smaller edge of the image will be matched \n to this number. i.e, if height > width, then image will be rescaled\n to (size * height / width, size).\n\n Examples:\n >>> # create a new dataset on the Lightly platform and upload thumbnails to it\n >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz'\n >>>\n >>> # upload thumbnails to the Lightly platform to an existing dataset\n >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ'\n >>> \n >>> # create a new dataset on the Lightly platform and upload full images to it\n >>> lightly-upload input_dir=data/ token='123' new_dataset_name='new_dataset_name_xyz' upload='full'\n >>>\n >>> # upload metadata to the Lightly platform\n >>> lightly-upload input_dir=data/ token='123' dataset_id='XYZ' upload='metadata'\n >>>\n >>> # upload embeddings to the Lightly platform (must have uploaded images beforehand)\n >>> lightly-upload embeddings=embeddings.csv token='123' dataset_id='XYZ'\n >>>\n >>> # upload both, images and embeddings in a single command\n >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\\\\n >>> token='123' dataset_id='XYZ'\n >>> # create a new dataset on the Lightly platform and upload both, images and embeddings\n >>> lightly-upload input_dir=data/ embeddings=embeddings.csv upload='full' \\\\\n >>> token='123' new_dataset_name='new_dataset_name_xyz'\n\n \"\"\"\n _upload_cli(cfg)\n\n\ndef entry():\n upload_cli()\n", "path": "lightly/cli/upload_cli.py"}]}
2,158
136
gh_patches_debug_17904
rasdani/github-patches
git_diff
sopel-irc__sopel-983
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [currency] Currency codes are needlessly case-sensitive There's no reason that the currency codes should have to be capitalized by the user, is there? ``` [06:03:23] <dgw> ;cur 1 USD in CAD [06:03:24] <Sopel> 1.0 USD (U.S. dollar) = 1.386001386 CAD (Canadian Dollar) [06:03:28] <dgw> ;cur 1 usd in CAD [06:03:29] <Sopel> 1.0 usd (U.S. dollar) = 1.386001386 CAD (Canadian Dollar) [06:03:33] <dgw> ;cur 1 USD in cad [06:03:34] <Sopel> dgw: Unknown currency: cad ``` If no reason not to do so arises, I'll PR this one myself in a couple days. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/currency.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2013 Elsie Powell, embolalia.com 3 # Licensed under the Eiffel Forum License 2 4 from __future__ import unicode_literals, absolute_import, print_function, division 5 6 import json 7 import xmltodict 8 import re 9 10 from sopel import web 11 from sopel.module import commands, example, NOLIMIT 12 13 # The Canadian central bank has better exchange rate data than the Fed, the 14 # Bank of England, or the European Central Bank. Who knew? 15 base_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml' 16 regex = re.compile(r''' 17 (\d+(?:\.\d+)?) # Decimal number 18 \s*([a-zA-Z]{3}) # 3-letter currency code 19 \s+(?:in|as|of|to)\s+ # preposition 20 ([a-zA-Z]{3}) # 3-letter currency code 21 ''', re.VERBOSE) 22 23 24 def get_rate(code): 25 if code.upper() == 'CAD': 26 return 1, 'Canadian Dollar' 27 elif code.upper() == 'BTC': 28 rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all')) 29 return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' 30 31 data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True) 32 if headers['_http_status'] == 404: 33 return False, False 34 namespaces = { 35 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb', 36 'http://purl.org/rss/1.0/': None, 37 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' } 38 xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF') 39 namestring = xml.get('channel').get('title').get('#text') 40 name = namestring[len('Bank of Canada noon rate: '):] 41 name = re.sub(r'\s*\(noon\)\s*', '', name) 42 rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text') 43 return float(rate), name 44 45 46 @commands('cur', 'currency', 'exchange') 47 @example('.cur 20 EUR in USD') 48 def exchange(bot, trigger): 49 """Show the exchange rate between two currencies""" 50 if not trigger.group(2): 51 return bot.reply("No search term. An example: .cur 20 EUR in USD") 52 match = regex.match(trigger.group(2)) 53 if not match: 54 # It's apologetic, because it's using Canadian data. 55 bot.reply("Sorry, I didn't understand the input.") 56 return NOLIMIT 57 58 amount, of, to = match.groups() 59 try: 60 amount = float(amount) 61 except: 62 bot.reply("Sorry, I didn't understand the input.") 63 display(bot, amount, of, to) 64 65 66 def display(bot, amount, of, to): 67 if not amount: 68 bot.reply("Zero is zero, no matter what country you're in.") 69 try: 70 of_rate, of_name = get_rate(of) 71 if not of_name: 72 bot.reply("Unknown currency: %s" % of) 73 return 74 to_rate, to_name = get_rate(to) 75 if not to_name: 76 bot.reply("Unknown currency: %s" % to) 77 return 78 except Exception as e: 79 raise 80 bot.reply("Something went wrong while I was getting the exchange rate.") 81 return NOLIMIT 82 83 result = amount / of_rate * to_rate 84 bot.say("{} {} ({}) = {} {} ({})".format(amount, of, of_name, 85 result, to, to_name)) 86 87 88 @commands('btc', 'bitcoin') 89 @example('.btc 20 EUR') 90 def bitcoin(bot, trigger): 91 #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency. 92 to = trigger.group(4) 93 amount = trigger.group(3) 94 if not to: 95 to = trigger.group(3) or 'USD' 96 amount = 1 97 98 try: 99 amount = float(amount) 100 except: 101 bot.reply("Sorry, I didn't understand the input.") 102 return NOLIMIT 103 104 display(bot, amount, 'BTC', to) 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py --- a/sopel/modules/currency.py +++ b/sopel/modules/currency.py @@ -26,9 +26,10 @@ def get_rate(code): - if code.upper() == 'CAD': + code = code.upper() + if code == 'CAD': return 1, 'Canadian Dollar' - elif code.upper() == 'BTC': + elif code == 'BTC': rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all')) return 1 / rates['CAD']['24h_avg'], 'Bitcoin—24hr average' @@ -85,8 +86,8 @@ return NOLIMIT result = amount / of_rate * to_rate - bot.say("{} {} ({}) = {} {} ({})".format(amount, of, of_name, - result, to, to_name)) + bot.say("{} {} ({}) = {} {} ({})".format(amount, of.upper(), of_name, + result, to.upper(), to_name)) @commands('btc', 'bitcoin')
{"golden_diff": "diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py\n--- a/sopel/modules/currency.py\n+++ b/sopel/modules/currency.py\n@@ -26,9 +26,10 @@\n \n \n def get_rate(code):\n- if code.upper() == 'CAD':\n+ code = code.upper()\n+ if code == 'CAD':\n return 1, 'Canadian Dollar'\n- elif code.upper() == 'BTC':\n+ elif code == 'BTC':\n rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all'))\n return 1 / rates['CAD']['24h_avg'], 'Bitcoin\u201424hr average'\n \n@@ -85,8 +86,8 @@\n return NOLIMIT\n \n result = amount / of_rate * to_rate\n- bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of, of_name,\n- result, to, to_name))\n+ bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n+ result, to.upper(), to_name))\n \n \n @commands('btc', 'bitcoin')\n", "issue": "[currency] Currency codes are needlessly case-sensitive\nThere's no reason that the currency codes should have to be capitalized by the user, is there?\n\n```\n[06:03:23] <dgw> ;cur 1 USD in CAD\n[06:03:24] <Sopel> 1.0 USD (U.S. dollar) = 1.386001386 CAD (Canadian Dollar)\n[06:03:28] <dgw> ;cur 1 usd in CAD\n[06:03:29] <Sopel> 1.0 usd (U.S. dollar) = 1.386001386 CAD (Canadian Dollar)\n[06:03:33] <dgw> ;cur 1 USD in cad\n[06:03:34] <Sopel> dgw: Unknown currency: cad\n```\n\nIf no reason not to do so arises, I'll PR this one myself in a couple days.\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport xmltodict\nimport re\n\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n if code.upper() == 'CAD':\n return 1, 'Canadian Dollar'\n elif code.upper() == 'BTC':\n rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all'))\n return 1 / rates['CAD']['24h_avg'], 'Bitcoin\u201424hr average'\n\n data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True)\n if headers['_http_status'] == 404:\n return False, False\n namespaces = {\n 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb',\n 'http://purl.org/rss/1.0/': None,\n 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' }\n xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF')\n namestring = xml.get('channel').get('title').get('#text')\n name = namestring[len('Bank of Canada noon rate: '):]\n name = re.sub(r'\\s*\\(noon\\)\\s*', '', name)\n rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text')\n return float(rate), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception as e:\n raise\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of, of_name,\n result, to, to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"currency.py - Sopel Exchange Rate Module\nCopyright 2013 Edward Powell, embolalia.com\nLicensed under the Eiffel Forum License 2\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport json\nimport xmltodict\nimport re\n\nfrom sopel import web\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n code = code.upper()\n if code == 'CAD':\n return 1, 'Canadian Dollar'\n elif code == 'BTC':\n rates = json.loads(web.get('https://api.bitcoinaverage.com/ticker/all'))\n return 1 / rates['CAD']['24h_avg'], 'Bitcoin\u201424hr average'\n\n data, headers = web.get(base_url.format(code), dont_decode=True, return_headers=True)\n if headers['_http_status'] == 404:\n return False, False\n namespaces = {\n 'http://www.cbwiki.net/wiki/index.php/Specification_1.1': 'cb', \n 'http://purl.org/rss/1.0/': None, \n 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf' }\n xml = xmltodict.parse(data, process_namespaces=True, namespaces=namespaces).get('rdf:RDF')\n namestring = xml.get('channel').get('title').get('#text')\n name = namestring[len('Bank of Canada noon rate: '):]\n name = re.sub(r'\\s*\\(noon\\)\\s*', '', name)\n rate = xml.get('item').get('cb:statistics').get('cb:exchangeRate').get('cb:value').get('#text')\n return float(rate), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception as e:\n raise\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n result, to.upper(), to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py"}]}
1,701
255
gh_patches_debug_9215
rasdani/github-patches
git_diff
meltano__meltano-6321
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `meltano --help` fails on Windows in some environments Running any Meltano command in my fresh Windows VM results in the following error: ![image](https://user-images.githubusercontent.com/11428666/176504379-a9489721-09b5-4a2d-ad83-c2f5a37adfb0.png) Sorry for the image of text - I forgot to enable the shared clipboard for the VM. The failure is happening here: ```python freedesktop_data = ( platform.freedesktop_os_release() if safe_hasattr(platform, "freedesktop_os_release") else defaultdict(type(None)) ) ``` This might be because I'm running under git bash instead of Powershell. We had assumed that if `freedesktop_os_release` is present, it will work. This is apparently only sometimes true. To prevent problems for Windows users we should put the call to `platform.freedesktop_os_release` in a try-except block. Doing so will also mean we no longer need to call `hasattr`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/core/tracking/contexts/environment.py` Content: ``` 1 """Environment context for the Snowplow tracker.""" 2 3 from __future__ import annotations 4 5 import os 6 import platform 7 import uuid 8 from collections import defaultdict 9 from datetime import datetime 10 from pathlib import Path 11 from typing import Any 12 13 import psutil 14 from cached_property import cached_property 15 from snowplow_tracker import SelfDescribingJson 16 from structlog.stdlib import get_logger 17 18 import meltano 19 from meltano.core.tracking.schemas import EnvironmentContextSchema 20 from meltano.core.utils import hash_sha256, safe_hasattr 21 22 logger = get_logger(__name__) 23 24 # This file is only ever created in CI when building a release 25 release_marker_path = Path(__file__).parent / ".release_marker" 26 27 28 class EnvironmentContext(SelfDescribingJson): 29 """Environment context for the Snowplow tracker.""" 30 31 def __init__(self): 32 """Initialize the environment context.""" 33 ci_markers = ("GITHUB_ACTIONS", "CI") 34 super().__init__( 35 EnvironmentContextSchema.url, 36 { 37 "context_uuid": str(uuid.uuid4()), 38 "meltano_version": meltano.__version__, 39 "is_dev_build": not release_marker_path.exists(), 40 "is_ci_environment": any( 41 # True if 'true', 'TRUE', 'True', or '1' 42 os.environ.get(marker, "").lower()[:1] in {"1", "t"} 43 for marker in ci_markers 44 ), 45 "python_version": platform.python_version(), 46 "python_implementation": platform.python_implementation(), 47 **self.system_info, 48 **self.process_info, 49 }, 50 ) 51 52 @cached_property 53 def system_info(self) -> dict[str, Any]: 54 """Get system information. 55 56 Returns: 57 A dictionary containing system information. 58 """ 59 freedesktop_data = ( 60 platform.freedesktop_os_release() 61 if safe_hasattr(platform, "freedesktop_os_release") 62 else defaultdict(type(None)) 63 ) 64 65 return { 66 "system_name": platform.system() or None, 67 "system_release": platform.release() or None, 68 "system_version": platform.version() or None, 69 "machine": platform.machine() or None, 70 "windows_edition": platform.win32_edition() 71 if safe_hasattr(platform, "win32_edition") 72 else None, 73 "freedesktop_id": freedesktop_data["ID"], 74 "freedesktop_id_like": freedesktop_data.get("ID_LIKE", None), 75 "freedesktop_version_id": freedesktop_data.get("VERSION_ID", None), 76 } 77 78 @staticmethod 79 def get_process_timestamp(process: psutil.Process) -> str: 80 """Obtain the creation time of a process as a ISO 8601 timestamp. 81 82 Args: 83 process: The process to obtain the creation time from. 84 85 Returns: 86 A ISO 8601 timestamp formatted string. 87 """ 88 return f"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z" 89 90 @cached_property 91 def process_info(self) -> dict[str, Any]: 92 """Obtain the process information for the current process. 93 94 Returns: 95 A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc 96 """ 97 process = psutil.Process() 98 with process.oneshot(): 99 return { 100 "num_cpu_cores": psutil.cpu_count(), 101 "num_cpu_cores_available": self.num_available_cores, 102 "process_hierarchy": [ 103 { 104 "process_name_hash": hash_sha256(proc.name()), 105 "process_creation_timestamp": self.get_process_timestamp(proc), 106 } 107 for proc in (process, *process.parents()) 108 ], 109 } 110 111 @cached_property 112 def num_available_cores(self) -> int: 113 """Obtain the number of available CPU cores. 114 115 Uses sched_getaffinity where available, otherwise falls back to cpu_count(). 116 117 Returns: 118 int: The number of available CPU cores. 119 """ 120 if safe_hasattr(os, "sched_getaffinity"): 121 return len(os.sched_getaffinity(0)) 122 return os.cpu_count() 123 124 125 environment_context = EnvironmentContext() 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py --- a/src/meltano/core/tracking/contexts/environment.py +++ b/src/meltano/core/tracking/contexts/environment.py @@ -56,11 +56,10 @@ Returns: A dictionary containing system information. """ - freedesktop_data = ( - platform.freedesktop_os_release() - if safe_hasattr(platform, "freedesktop_os_release") - else defaultdict(type(None)) - ) + try: + freedesktop_data = platform.freedesktop_os_release() + except Exception: + freedesktop_data = defaultdict(type(None)) return { "system_name": platform.system() or None,
{"golden_diff": "diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py\n--- a/src/meltano/core/tracking/contexts/environment.py\n+++ b/src/meltano/core/tracking/contexts/environment.py\n@@ -56,11 +56,10 @@\n Returns:\n A dictionary containing system information.\n \"\"\"\n- freedesktop_data = (\n- platform.freedesktop_os_release()\n- if safe_hasattr(platform, \"freedesktop_os_release\")\n- else defaultdict(type(None))\n- )\n+ try:\n+ freedesktop_data = platform.freedesktop_os_release()\n+ except Exception:\n+ freedesktop_data = defaultdict(type(None))\n \n return {\n \"system_name\": platform.system() or None,\n", "issue": "`meltano --help` fails on Windows in some environments\nRunning any Meltano command in my fresh Windows VM results in the following error:\r\n\r\n![image](https://user-images.githubusercontent.com/11428666/176504379-a9489721-09b5-4a2d-ad83-c2f5a37adfb0.png)\r\n\r\nSorry for the image of text - I forgot to enable the shared clipboard for the VM.\r\n\r\nThe failure is happening here:\r\n\r\n```python\r\n freedesktop_data = (\r\n platform.freedesktop_os_release()\r\n if safe_hasattr(platform, \"freedesktop_os_release\")\r\n else defaultdict(type(None))\r\n )\r\n```\r\n\r\nThis might be because I'm running under git bash instead of Powershell.\r\n\r\nWe had assumed that if `freedesktop_os_release` is present, it will work. This is apparently only sometimes true. To prevent problems for Windows users we should put the call to `platform.freedesktop_os_release` in a try-except block. Doing so will also mean we no longer need to call `hasattr`.\n", "before_files": [{"content": "\"\"\"Environment context for the Snowplow tracker.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport uuid\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any\n\nimport psutil\nfrom cached_property import cached_property\nfrom snowplow_tracker import SelfDescribingJson\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.tracking.schemas import EnvironmentContextSchema\nfrom meltano.core.utils import hash_sha256, safe_hasattr\n\nlogger = get_logger(__name__)\n\n# This file is only ever created in CI when building a release\nrelease_marker_path = Path(__file__).parent / \".release_marker\"\n\n\nclass EnvironmentContext(SelfDescribingJson):\n \"\"\"Environment context for the Snowplow tracker.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the environment context.\"\"\"\n ci_markers = (\"GITHUB_ACTIONS\", \"CI\")\n super().__init__(\n EnvironmentContextSchema.url,\n {\n \"context_uuid\": str(uuid.uuid4()),\n \"meltano_version\": meltano.__version__,\n \"is_dev_build\": not release_marker_path.exists(),\n \"is_ci_environment\": any(\n # True if 'true', 'TRUE', 'True', or '1'\n os.environ.get(marker, \"\").lower()[:1] in {\"1\", \"t\"}\n for marker in ci_markers\n ),\n \"python_version\": platform.python_version(),\n \"python_implementation\": platform.python_implementation(),\n **self.system_info,\n **self.process_info,\n },\n )\n\n @cached_property\n def system_info(self) -> dict[str, Any]:\n \"\"\"Get system information.\n\n Returns:\n A dictionary containing system information.\n \"\"\"\n freedesktop_data = (\n platform.freedesktop_os_release()\n if safe_hasattr(platform, \"freedesktop_os_release\")\n else defaultdict(type(None))\n )\n\n return {\n \"system_name\": platform.system() or None,\n \"system_release\": platform.release() or None,\n \"system_version\": platform.version() or None,\n \"machine\": platform.machine() or None,\n \"windows_edition\": platform.win32_edition()\n if safe_hasattr(platform, \"win32_edition\")\n else None,\n \"freedesktop_id\": freedesktop_data[\"ID\"],\n \"freedesktop_id_like\": freedesktop_data.get(\"ID_LIKE\", None),\n \"freedesktop_version_id\": freedesktop_data.get(\"VERSION_ID\", None),\n }\n\n @staticmethod\n def get_process_timestamp(process: psutil.Process) -> str:\n \"\"\"Obtain the creation time of a process as a ISO 8601 timestamp.\n\n Args:\n process: The process to obtain the creation time from.\n\n Returns:\n A ISO 8601 timestamp formatted string.\n \"\"\"\n return f\"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z\"\n\n @cached_property\n def process_info(self) -> dict[str, Any]:\n \"\"\"Obtain the process information for the current process.\n\n Returns:\n A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc\n \"\"\"\n process = psutil.Process()\n with process.oneshot():\n return {\n \"num_cpu_cores\": psutil.cpu_count(),\n \"num_cpu_cores_available\": self.num_available_cores,\n \"process_hierarchy\": [\n {\n \"process_name_hash\": hash_sha256(proc.name()),\n \"process_creation_timestamp\": self.get_process_timestamp(proc),\n }\n for proc in (process, *process.parents())\n ],\n }\n\n @cached_property\n def num_available_cores(self) -> int:\n \"\"\"Obtain the number of available CPU cores.\n\n Uses sched_getaffinity where available, otherwise falls back to cpu_count().\n\n Returns:\n int: The number of available CPU cores.\n \"\"\"\n if safe_hasattr(os, \"sched_getaffinity\"):\n return len(os.sched_getaffinity(0))\n return os.cpu_count()\n\n\nenvironment_context = EnvironmentContext()\n", "path": "src/meltano/core/tracking/contexts/environment.py"}], "after_files": [{"content": "\"\"\"Environment context for the Snowplow tracker.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport uuid\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any\n\nimport psutil\nfrom cached_property import cached_property\nfrom snowplow_tracker import SelfDescribingJson\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.tracking.schemas import EnvironmentContextSchema\nfrom meltano.core.utils import hash_sha256, safe_hasattr\n\nlogger = get_logger(__name__)\n\n# This file is only ever created in CI when building a release\nrelease_marker_path = Path(__file__).parent / \".release_marker\"\n\n\nclass EnvironmentContext(SelfDescribingJson):\n \"\"\"Environment context for the Snowplow tracker.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the environment context.\"\"\"\n ci_markers = (\"GITHUB_ACTIONS\", \"CI\")\n super().__init__(\n EnvironmentContextSchema.url,\n {\n \"context_uuid\": str(uuid.uuid4()),\n \"meltano_version\": meltano.__version__,\n \"is_dev_build\": not release_marker_path.exists(),\n \"is_ci_environment\": any(\n # True if 'true', 'TRUE', 'True', or '1'\n os.environ.get(marker, \"\").lower()[:1] in {\"1\", \"t\"}\n for marker in ci_markers\n ),\n \"python_version\": platform.python_version(),\n \"python_implementation\": platform.python_implementation(),\n **self.system_info,\n **self.process_info,\n },\n )\n\n @cached_property\n def system_info(self) -> dict[str, Any]:\n \"\"\"Get system information.\n\n Returns:\n A dictionary containing system information.\n \"\"\"\n try:\n freedesktop_data = platform.freedesktop_os_release()\n except Exception:\n freedesktop_data = defaultdict(type(None))\n\n return {\n \"system_name\": platform.system() or None,\n \"system_release\": platform.release() or None,\n \"system_version\": platform.version() or None,\n \"machine\": platform.machine() or None,\n \"windows_edition\": platform.win32_edition()\n if safe_hasattr(platform, \"win32_edition\")\n else None,\n \"freedesktop_id\": freedesktop_data[\"ID\"],\n \"freedesktop_id_like\": freedesktop_data.get(\"ID_LIKE\", None),\n \"freedesktop_version_id\": freedesktop_data.get(\"VERSION_ID\", None),\n }\n\n @staticmethod\n def get_process_timestamp(process: psutil.Process) -> str:\n \"\"\"Obtain the creation time of a process as a ISO 8601 timestamp.\n\n Args:\n process: The process to obtain the creation time from.\n\n Returns:\n A ISO 8601 timestamp formatted string.\n \"\"\"\n return f\"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z\"\n\n @cached_property\n def process_info(self) -> dict[str, Any]:\n \"\"\"Obtain the process information for the current process.\n\n Returns:\n A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc\n \"\"\"\n process = psutil.Process()\n with process.oneshot():\n return {\n \"num_cpu_cores\": psutil.cpu_count(),\n \"num_cpu_cores_available\": self.num_available_cores,\n \"process_hierarchy\": [\n {\n \"process_name_hash\": hash_sha256(proc.name()),\n \"process_creation_timestamp\": self.get_process_timestamp(proc),\n }\n for proc in (process, *process.parents())\n ],\n }\n\n @cached_property\n def num_available_cores(self) -> int:\n \"\"\"Obtain the number of available CPU cores.\n\n Uses sched_getaffinity where available, otherwise falls back to cpu_count().\n\n Returns:\n int: The number of available CPU cores.\n \"\"\"\n if safe_hasattr(os, \"sched_getaffinity\"):\n return len(os.sched_getaffinity(0))\n return os.cpu_count()\n\n\nenvironment_context = EnvironmentContext()\n", "path": "src/meltano/core/tracking/contexts/environment.py"}]}
1,650
170
gh_patches_debug_255
rasdani/github-patches
git_diff
rasterio__rasterio-892
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Decimated read result differs depending on GDAL version ``` $ rio insp tests/data/alpha_masked_values.tif Rasterio 1.0a1 Interactive Inspector (Python 3.5.1) Type "src.meta", "src.read(1)", or "help(src)" for more information. >>> src.read(4, out_shape=(19, 19), masked=False)[-2:, 0:5] array([[ 0, 0, 0, 255, 0], [255, 255, 255, 255, 0]], dtype=uint8) >>> rasterio.__version__ '1.0a1' >>> rasterio.__gdal_version__ '1.11.5' ``` versus ``` $ rio insp tests/data/alpha_masked_values.tif Rasterio 1.0a1 Interactive Inspector (Python 3.5.1) Type "src.meta", "src.read(1)", or "help(src)" for more information. >>> src.read(4, out_shape=(19, 19), masked=False)[-2:, 0:5] array([[ 0, 0, 32, 64, 0], [255, 255, 255, 255, 0]], dtype=uint8) >>> rasterio.__version__ '1.0a1' >>> rasterio.__gdal_version__ '2.1.1' ``` I'll start a new branch with a similar test so we can put it through the travis build matrix. cc @dnomadb @sgillies --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/env.py` Content: ``` 1 """Rasterio's GDAL/AWS environment""" 2 3 import logging 4 5 from rasterio._drivers import ( 6 GDALEnv, del_gdal_config, get_gdal_config, set_gdal_config) 7 from rasterio.dtypes import check_dtype 8 from rasterio.errors import EnvError 9 from rasterio.compat import string_types 10 from rasterio.transform import guard_transform 11 from rasterio.vfs import parse_path, vsi_path 12 13 14 # The currently active GDAL/AWS environment is a private attribute. 15 _env = None 16 17 log = logging.getLogger(__name__) 18 19 # Rasterio defaults 20 default_options = { 21 'CHECK_WITH_INVERT_PROJ': True 22 } 23 24 class Env(object): 25 """Abstraction for GDAL and AWS configuration 26 27 The GDAL library is stateful: it has a registry of format drivers, 28 an error stack, and dozens of configuration options. 29 30 Rasterio's approach to working with GDAL is to wrap all the state 31 up using a Python context manager (see PEP 343, 32 https://www.python.org/dev/peps/pep-0343/). When the context is 33 entered GDAL drivers are registered, error handlers are 34 configured, and configuration options are set. When the context 35 is exited, drivers are removed from the registry and other 36 configurations are removed. 37 38 Example: 39 40 with rasterio.Env(GDAL_CACHEMAX=512) as env: 41 # All drivers are registered, GDAL's raster block cache 42 # size is set to 512MB. 43 # Commence processing... 44 ... 45 # End of processing. 46 47 # At this point, configuration options are set to their 48 # previous (possible unset) values. 49 50 A boto3 session or boto3 session constructor arguments 51 `aws_access_key_id`, `aws_secret_access_key`, `aws_session_token` 52 may be passed to Env's constructor. In the latter case, a session 53 will be created as soon as needed. AWS credentials are configured 54 for GDAL as needed. 55 """ 56 57 def __init__(self, aws_session=None, aws_access_key_id=None, 58 aws_secret_access_key=None, aws_session_token=None, 59 region_name=None, profile_name=None, **options): 60 """Create a new GDAL/AWS environment. 61 62 Note: this class is a context manager. GDAL isn't configured 63 until the context is entered via `with rasterio.Env():` 64 65 Parameters 66 ---------- 67 aws_session: object, optional 68 A boto3 session. 69 aws_access_key_id: string, optional 70 An access key id, as per boto3. 71 aws_secret_access_key: string, optional 72 A secret access key, as per boto3. 73 aws_session_token: string, optional 74 A session token, as per boto3. 75 region_name: string, optional 76 A region name, as per boto3. 77 profile_name: string, optional 78 A shared credentials profile name, as per boto3. 79 **options: optional 80 A mapping of GDAL configuration options, e.g., 81 `CPL_DEBUG=True, CHECK_WITH_INVERT_PROJ=False`. 82 83 Returns 84 ------- 85 A new instance of Env. 86 87 Note: We raise EnvError if the GDAL config options 88 AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY are given. AWS 89 credentials are handled exclusively by boto3. 90 """ 91 if ('AWS_ACCESS_KEY_ID' in options or 92 'AWS_SECRET_ACCESS_KEY' in options): 93 raise EnvError( 94 "GDAL's AWS config options can not be directly set. " 95 "AWS credentials are handled exclusively by boto3.") 96 self.aws_access_key_id = aws_access_key_id 97 self.aws_secret_access_key = aws_secret_access_key 98 self.aws_session_token = aws_session_token 99 self.region_name = region_name 100 self.profile_name = profile_name 101 self.aws_session = aws_session 102 self._creds = ( 103 self.aws_session._session.get_credentials() 104 if self.aws_session else None) 105 self.options = options.copy() 106 self.previous_options = {} 107 defenv() 108 109 def get_aws_credentials(self): 110 """Get credentials and configure GDAL.""" 111 import boto3 112 options = {} 113 if not self.aws_session: 114 self.aws_session = boto3.Session( 115 aws_access_key_id=self.aws_access_key_id, 116 aws_secret_access_key=self.aws_secret_access_key, 117 aws_session_token=self.aws_session_token, 118 region_name=self.region_name, 119 profile_name=self.profile_name) 120 self._creds = self.aws_session._session.get_credentials() 121 122 # Pass these credentials to the GDAL environment. 123 if self._creds.access_key: # pragma: no branch 124 options.update(aws_access_key_id=self._creds.access_key) 125 if self._creds.secret_key: # pragma: no branch 126 options.update(aws_secret_access_key=self._creds.secret_key) 127 if self._creds.token: 128 options.update(aws_session_token=self._creds.token) 129 if self.aws_session.region_name: 130 options.update(aws_region=self.aws_session.region_name) 131 132 # Pass these credentials to the GDAL environment. 133 defenv() 134 global _env 135 _env.update_config_options(**options) 136 137 def drivers(self): 138 """Return a mapping of registered drivers.""" 139 global _env 140 return _env.drivers() 141 142 def __enter__(self): 143 self.previous_options = getenv() 144 setenv(**self.options) 145 log.debug("Entering env %r context", self) 146 return self 147 148 def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): 149 delenv() 150 setenv(**self.previous_options) 151 log.debug("Exiting env %r context", self) 152 153 154 def defenv(): 155 """Create a default environment if necessary.""" 156 global _env 157 if _env: 158 log.debug("Environment %r exists", _env) 159 else: 160 _env = GDALEnv() 161 _env.update_config_options(**default_options) 162 log.debug( 163 "New GDAL environment %r created", _env) 164 165 166 def getenv(): 167 """Get a mapping of current options.""" 168 global _env 169 if not _env: 170 raise EnvError("No environment exists") 171 else: 172 log.debug("Got a copy of environment %r options", _env) 173 return _env.options.copy() 174 175 176 def setenv(**options): 177 """Set options in the existing environment.""" 178 global _env 179 if not _env: 180 raise EnvError("No environment exists") 181 else: 182 _env.update_config_options(**options) 183 log.debug("Updated existing %r with options %r", _env, options) 184 185 186 def delenv(): 187 """Delete options in the existing environment.""" 188 global _env 189 if not _env: 190 raise EnvError("No environment exists") 191 else: 192 _env.clear_config_options() 193 log.debug("Cleared existing %r options", _env) 194 195 196 def ensure_env(f): 197 """A decorator that ensures an env exists before a function 198 calls any GDAL C functions.""" 199 defenv() 200 return f 201 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/env.py b/rasterio/env.py --- a/rasterio/env.py +++ b/rasterio/env.py @@ -18,7 +18,8 @@ # Rasterio defaults default_options = { - 'CHECK_WITH_INVERT_PROJ': True + 'CHECK_WITH_INVERT_PROJ': True, + 'GTIFF_IMPLICIT_JPEG_OVR': False } class Env(object):
{"golden_diff": "diff --git a/rasterio/env.py b/rasterio/env.py\n--- a/rasterio/env.py\n+++ b/rasterio/env.py\n@@ -18,7 +18,8 @@\n \n # Rasterio defaults\n default_options = {\n- 'CHECK_WITH_INVERT_PROJ': True\n+ 'CHECK_WITH_INVERT_PROJ': True,\n+ 'GTIFF_IMPLICIT_JPEG_OVR': False\n }\n \n class Env(object):\n", "issue": "Decimated read result differs depending on GDAL version\n```\n$ rio insp tests/data/alpha_masked_values.tif\nRasterio 1.0a1 Interactive Inspector (Python 3.5.1)\nType \"src.meta\", \"src.read(1)\", or \"help(src)\" for more information.\n>>> src.read(4, out_shape=(19, 19), masked=False)[-2:, 0:5]\narray([[ 0, 0, 0, 255, 0],\n [255, 255, 255, 255, 0]], dtype=uint8)\n>>> rasterio.__version__\n'1.0a1'\n>>> rasterio.__gdal_version__\n'1.11.5'\n```\n\nversus \n\n```\n$ rio insp tests/data/alpha_masked_values.tif\nRasterio 1.0a1 Interactive Inspector (Python 3.5.1)\nType \"src.meta\", \"src.read(1)\", or \"help(src)\" for more information.\n>>> src.read(4, out_shape=(19, 19), masked=False)[-2:, 0:5]\narray([[ 0, 0, 32, 64, 0],\n [255, 255, 255, 255, 0]], dtype=uint8)\n>>> rasterio.__version__\n'1.0a1'\n>>> rasterio.__gdal_version__\n'2.1.1'\n```\n\nI'll start a new branch with a similar test so we can put it through the travis build matrix.\n\ncc @dnomadb @sgillies \n\n", "before_files": [{"content": "\"\"\"Rasterio's GDAL/AWS environment\"\"\"\n\nimport logging\n\nfrom rasterio._drivers import (\n GDALEnv, del_gdal_config, get_gdal_config, set_gdal_config)\nfrom rasterio.dtypes import check_dtype\nfrom rasterio.errors import EnvError\nfrom rasterio.compat import string_types\nfrom rasterio.transform import guard_transform\nfrom rasterio.vfs import parse_path, vsi_path\n\n\n# The currently active GDAL/AWS environment is a private attribute.\n_env = None\n\nlog = logging.getLogger(__name__)\n\n# Rasterio defaults\ndefault_options = {\n 'CHECK_WITH_INVERT_PROJ': True\n}\n\nclass Env(object):\n \"\"\"Abstraction for GDAL and AWS configuration\n\n The GDAL library is stateful: it has a registry of format drivers,\n an error stack, and dozens of configuration options.\n\n Rasterio's approach to working with GDAL is to wrap all the state\n up using a Python context manager (see PEP 343,\n https://www.python.org/dev/peps/pep-0343/). When the context is\n entered GDAL drivers are registered, error handlers are\n configured, and configuration options are set. When the context\n is exited, drivers are removed from the registry and other\n configurations are removed.\n\n Example:\n\n with rasterio.Env(GDAL_CACHEMAX=512) as env:\n # All drivers are registered, GDAL's raster block cache\n # size is set to 512MB.\n # Commence processing...\n ...\n # End of processing.\n\n # At this point, configuration options are set to their\n # previous (possible unset) values.\n\n A boto3 session or boto3 session constructor arguments\n `aws_access_key_id`, `aws_secret_access_key`, `aws_session_token`\n may be passed to Env's constructor. In the latter case, a session\n will be created as soon as needed. AWS credentials are configured\n for GDAL as needed.\n \"\"\"\n\n def __init__(self, aws_session=None, aws_access_key_id=None,\n aws_secret_access_key=None, aws_session_token=None,\n region_name=None, profile_name=None, **options):\n \"\"\"Create a new GDAL/AWS environment.\n\n Note: this class is a context manager. GDAL isn't configured\n until the context is entered via `with rasterio.Env():`\n\n Parameters\n ----------\n aws_session: object, optional\n A boto3 session.\n aws_access_key_id: string, optional\n An access key id, as per boto3.\n aws_secret_access_key: string, optional\n A secret access key, as per boto3.\n aws_session_token: string, optional\n A session token, as per boto3.\n region_name: string, optional\n A region name, as per boto3.\n profile_name: string, optional\n A shared credentials profile name, as per boto3.\n **options: optional\n A mapping of GDAL configuration options, e.g.,\n `CPL_DEBUG=True, CHECK_WITH_INVERT_PROJ=False`.\n\n Returns\n -------\n A new instance of Env.\n\n Note: We raise EnvError if the GDAL config options\n AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY are given. AWS\n credentials are handled exclusively by boto3.\n \"\"\"\n if ('AWS_ACCESS_KEY_ID' in options or\n 'AWS_SECRET_ACCESS_KEY' in options):\n raise EnvError(\n \"GDAL's AWS config options can not be directly set. \"\n \"AWS credentials are handled exclusively by boto3.\")\n self.aws_access_key_id = aws_access_key_id\n self.aws_secret_access_key = aws_secret_access_key\n self.aws_session_token = aws_session_token\n self.region_name = region_name\n self.profile_name = profile_name\n self.aws_session = aws_session\n self._creds = (\n self.aws_session._session.get_credentials()\n if self.aws_session else None)\n self.options = options.copy()\n self.previous_options = {}\n defenv()\n\n def get_aws_credentials(self):\n \"\"\"Get credentials and configure GDAL.\"\"\"\n import boto3\n options = {}\n if not self.aws_session:\n self.aws_session = boto3.Session(\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n region_name=self.region_name,\n profile_name=self.profile_name)\n self._creds = self.aws_session._session.get_credentials()\n\n # Pass these credentials to the GDAL environment.\n if self._creds.access_key: # pragma: no branch\n options.update(aws_access_key_id=self._creds.access_key)\n if self._creds.secret_key: # pragma: no branch\n options.update(aws_secret_access_key=self._creds.secret_key)\n if self._creds.token:\n options.update(aws_session_token=self._creds.token)\n if self.aws_session.region_name:\n options.update(aws_region=self.aws_session.region_name)\n\n # Pass these credentials to the GDAL environment.\n defenv()\n global _env\n _env.update_config_options(**options)\n\n def drivers(self):\n \"\"\"Return a mapping of registered drivers.\"\"\"\n global _env\n return _env.drivers()\n\n def __enter__(self):\n self.previous_options = getenv()\n setenv(**self.options)\n log.debug(\"Entering env %r context\", self)\n return self\n\n def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):\n delenv()\n setenv(**self.previous_options)\n log.debug(\"Exiting env %r context\", self)\n\n\ndef defenv():\n \"\"\"Create a default environment if necessary.\"\"\"\n global _env\n if _env:\n log.debug(\"Environment %r exists\", _env)\n else:\n _env = GDALEnv()\n _env.update_config_options(**default_options)\n log.debug(\n \"New GDAL environment %r created\", _env)\n\n\ndef getenv():\n \"\"\"Get a mapping of current options.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n log.debug(\"Got a copy of environment %r options\", _env)\n return _env.options.copy()\n\n\ndef setenv(**options):\n \"\"\"Set options in the existing environment.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n _env.update_config_options(**options)\n log.debug(\"Updated existing %r with options %r\", _env, options)\n\n\ndef delenv():\n \"\"\"Delete options in the existing environment.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n _env.clear_config_options()\n log.debug(\"Cleared existing %r options\", _env)\n\n\ndef ensure_env(f):\n \"\"\"A decorator that ensures an env exists before a function\n calls any GDAL C functions.\"\"\"\n defenv()\n return f\n", "path": "rasterio/env.py"}], "after_files": [{"content": "\"\"\"Rasterio's GDAL/AWS environment\"\"\"\n\nimport logging\n\nfrom rasterio._drivers import (\n GDALEnv, del_gdal_config, get_gdal_config, set_gdal_config)\nfrom rasterio.dtypes import check_dtype\nfrom rasterio.errors import EnvError\nfrom rasterio.compat import string_types\nfrom rasterio.transform import guard_transform\nfrom rasterio.vfs import parse_path, vsi_path\n\n\n# The currently active GDAL/AWS environment is a private attribute.\n_env = None\n\nlog = logging.getLogger(__name__)\n\n# Rasterio defaults\ndefault_options = {\n 'CHECK_WITH_INVERT_PROJ': True,\n 'GTIFF_IMPLICIT_JPEG_OVR': False\n}\n\nclass Env(object):\n \"\"\"Abstraction for GDAL and AWS configuration\n\n The GDAL library is stateful: it has a registry of format drivers,\n an error stack, and dozens of configuration options.\n\n Rasterio's approach to working with GDAL is to wrap all the state\n up using a Python context manager (see PEP 343,\n https://www.python.org/dev/peps/pep-0343/). When the context is\n entered GDAL drivers are registered, error handlers are\n configured, and configuration options are set. When the context\n is exited, drivers are removed from the registry and other\n configurations are removed.\n\n Example:\n\n with rasterio.Env(GDAL_CACHEMAX=512) as env:\n # All drivers are registered, GDAL's raster block cache\n # size is set to 512MB.\n # Commence processing...\n ...\n # End of processing.\n\n # At this point, configuration options are set to their\n # previous (possible unset) values.\n\n A boto3 session or boto3 session constructor arguments\n `aws_access_key_id`, `aws_secret_access_key`, `aws_session_token`\n may be passed to Env's constructor. In the latter case, a session\n will be created as soon as needed. AWS credentials are configured\n for GDAL as needed.\n \"\"\"\n\n def __init__(self, aws_session=None, aws_access_key_id=None,\n aws_secret_access_key=None, aws_session_token=None,\n region_name=None, profile_name=None, **options):\n \"\"\"Create a new GDAL/AWS environment.\n\n Note: this class is a context manager. GDAL isn't configured\n until the context is entered via `with rasterio.Env():`\n\n Parameters\n ----------\n aws_session: object, optional\n A boto3 session.\n aws_access_key_id: string, optional\n An access key id, as per boto3.\n aws_secret_access_key: string, optional\n A secret access key, as per boto3.\n aws_session_token: string, optional\n A session token, as per boto3.\n region_name: string, optional\n A region name, as per boto3.\n profile_name: string, optional\n A shared credentials profile name, as per boto3.\n **options: optional\n A mapping of GDAL configuration options, e.g.,\n `CPL_DEBUG=True, CHECK_WITH_INVERT_PROJ=False`.\n\n Returns\n -------\n A new instance of Env.\n\n Note: We raise EnvError if the GDAL config options\n AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY are given. AWS\n credentials are handled exclusively by boto3.\n \"\"\"\n if ('AWS_ACCESS_KEY_ID' in options or\n 'AWS_SECRET_ACCESS_KEY' in options):\n raise EnvError(\n \"GDAL's AWS config options can not be directly set. \"\n \"AWS credentials are handled exclusively by boto3.\")\n self.aws_access_key_id = aws_access_key_id\n self.aws_secret_access_key = aws_secret_access_key\n self.aws_session_token = aws_session_token\n self.region_name = region_name\n self.profile_name = profile_name\n self.aws_session = aws_session\n self._creds = (\n self.aws_session._session.get_credentials()\n if self.aws_session else None)\n self.options = options.copy()\n self.previous_options = {}\n defenv()\n\n def get_aws_credentials(self):\n \"\"\"Get credentials and configure GDAL.\"\"\"\n import boto3\n options = {}\n if not self.aws_session:\n self.aws_session = boto3.Session(\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n aws_session_token=self.aws_session_token,\n region_name=self.region_name,\n profile_name=self.profile_name)\n self._creds = self.aws_session._session.get_credentials()\n\n # Pass these credentials to the GDAL environment.\n if self._creds.access_key: # pragma: no branch\n options.update(aws_access_key_id=self._creds.access_key)\n if self._creds.secret_key: # pragma: no branch\n options.update(aws_secret_access_key=self._creds.secret_key)\n if self._creds.token:\n options.update(aws_session_token=self._creds.token)\n if self.aws_session.region_name:\n options.update(aws_region=self.aws_session.region_name)\n\n # Pass these credentials to the GDAL environment.\n defenv()\n global _env\n _env.update_config_options(**options)\n\n def drivers(self):\n \"\"\"Return a mapping of registered drivers.\"\"\"\n global _env\n return _env.drivers()\n\n def __enter__(self):\n self.previous_options = getenv()\n setenv(**self.options)\n log.debug(\"Entering env %r context\", self)\n return self\n\n def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):\n delenv()\n setenv(**self.previous_options)\n log.debug(\"Exiting env %r context\", self)\n\n\ndef defenv():\n \"\"\"Create a default environment if necessary.\"\"\"\n global _env\n if _env:\n log.debug(\"Environment %r exists\", _env)\n else:\n _env = GDALEnv()\n _env.update_config_options(**default_options)\n log.debug(\n \"New GDAL environment %r created\", _env)\n\n\ndef getenv():\n \"\"\"Get a mapping of current options.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n log.debug(\"Got a copy of environment %r options\", _env)\n return _env.options.copy()\n\n\ndef setenv(**options):\n \"\"\"Set options in the existing environment.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n _env.update_config_options(**options)\n log.debug(\"Updated existing %r with options %r\", _env, options)\n\n\ndef delenv():\n \"\"\"Delete options in the existing environment.\"\"\"\n global _env\n if not _env:\n raise EnvError(\"No environment exists\")\n else:\n _env.clear_config_options()\n log.debug(\"Cleared existing %r options\", _env)\n\n\ndef ensure_env(f):\n \"\"\"A decorator that ensures an env exists before a function\n calls any GDAL C functions.\"\"\"\n defenv()\n return f\n", "path": "rasterio/env.py"}]}
2,661
99
gh_patches_debug_35943
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-13
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove Trailing (and Leading) underscores from clean_names Pandas handles this relatively well but would be a good as a kwarg for `jn.clean_names()`. The default `False` or `None` could leave leading, `True` or `'both'` to remove all and trailing and then pass `'leading'` and `'trailing`' or similar to remove each. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/functions.py` Content: ``` 1 import datetime as dt 2 from functools import reduce 3 4 import pandas as pd 5 6 from .errors import JanitorError 7 8 import re 9 10 11 def clean_names(df): 12 """ 13 Clean column names. 14 15 Takes all column names, converts them to lowercase, then replaces all 16 spaces with underscores. 17 18 Functional usage example: 19 20 .. code-block:: python 21 22 df = clean_names(df) 23 24 Method chaining example: 25 26 .. code-block:: python 27 28 df = pd.DataFrame(...) 29 df = jn.DataFrame(df).clean_names() 30 31 :param df: The pandas DataFrame object. 32 :returns: A pandas DataFrame. 33 """ 34 df = df.rename( 35 columns=lambda x: x.lower() 36 .replace(' ', '_') 37 .replace('/', '_') 38 .replace(':', '_') 39 .replace("'", '') 40 .replace('’', '') 41 .replace(',', '_') 42 .replace('?', '_') 43 .replace('-', '_') 44 .replace('(', '_') 45 .replace(')', '_') 46 .replace('.', '_') 47 ) 48 49 df = df.rename(columns=lambda x: re.sub('_+', '_', x)) 50 return df 51 52 53 def remove_empty(df): 54 """ 55 Drop all rows and columns that are completely null. 56 57 Implementation is shamelessly copied from `StackOverflow`_. 58 59 .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501 60 61 Functional usage example: 62 63 .. code-block:: python 64 65 df = remove_empty(df) 66 67 Method chaining example: 68 69 .. code-block:: python 70 71 df = pd.DataFrame(...) 72 df = jn.DataFrame(df).remove_empty() 73 74 :param df: The pandas DataFrame object. 75 :returns: A pandas DataFrame. 76 """ 77 78 nanrows = df.index[df.isnull().all(axis=1)] 79 df.drop(index=nanrows, inplace=True) 80 81 nancols = df.columns[df.isnull().all(axis=0)] 82 df.drop(columns=nancols, inplace=True) 83 84 return df 85 86 87 def get_dupes(df, columns=None): 88 """ 89 Returns all duplicate rows. 90 91 Functional usage example: 92 93 .. code-block:: python 94 95 get_dupes(df) 96 97 Method chaining example: 98 99 .. code-block:: python 100 101 df = pd.DataFrame(...) 102 jn.DataFrame(df).get_dupes() 103 104 :param df: The pandas DataFrame object. 105 :param str/iterable columns: (optional) A column name or an iterable (list 106 or tuple) of column names. Following pandas API, this only considers 107 certain columns for identifying duplicates. Defaults to using all 108 columns. 109 :returns: The duplicate rows, as a pandas DataFrame. 110 """ 111 dupes = df.duplicated(subset=columns, keep=False) 112 return df[dupes == True] # noqa: E712 113 114 115 def encode_categorical(df, columns): 116 """ 117 Encode the specified columns as categorical. 118 119 Functional usage example: 120 121 .. code-block:: python 122 123 encode_categorical(df, columns="my_categorical_column") # one way 124 125 Method chaining example: 126 127 .. code-block:: python 128 129 df = pd.DataFrame(...) 130 categorical_cols = ['col1', 'col2', 'col4'] 131 jn.DataFrame(df).encode_categorical(columns=categorical_cols) 132 133 :param df: The pandas DataFrame object. 134 :param str/iterable columns: A column name or an iterable (list or tuple) 135 of column names. 136 :returns: A pandas DataFrame 137 """ 138 if isinstance(columns, list) or isinstance(columns, tuple): 139 for col in columns: 140 assert col in df.columns, \ 141 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501 142 df[col] = pd.Categorical(df[col]) 143 elif isinstance(columns, str): 144 df[columns] = pd.Categorical(df[columns]) 145 else: 146 raise JanitorError('kwarg `columns` must be a string or iterable!') 147 return df 148 149 150 def get_features_targets(df, target_columns, feature_columns=None): 151 """ 152 Get the features and targets as separate DataFrames/Series. 153 154 The behaviour is as such: 155 156 - `target_columns` is mandatory. 157 - If `feature_columns` is present, then we will respect the column names 158 inside there. 159 - If `feature_columns` is not passed in, then we will assume that the 160 rest of the columns are feature columns, and return them. 161 162 Functional usage example: 163 164 .. code-block:: python 165 166 X, y = get_features_targets(df, target_columns="measurement") 167 168 Method chaining example: 169 170 .. code-block:: python 171 172 df = pd.DataFrame(...) 173 target_cols = ['output1', 'output2'] 174 X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501 175 176 :param df: The pandas DataFrame object. 177 :param str/iterable target_columns: Either a column name or an iterable 178 (list or tuple) of column names that are the target(s) to be predicted. 179 :param str/iterable feature_columns: (optional) The column name or iterable 180 of column names that are the features (a.k.a. predictors) used to 181 predict the targets. 182 :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are 183 pandas DataFrames. 184 """ 185 Y = df[target_columns] 186 187 if feature_columns: 188 X = df[feature_columns] 189 else: 190 if isinstance(target_columns, str): 191 xcols = [c for c in df.columns if target_columns != c] 192 elif (isinstance(target_columns, list) 193 or isinstance(target_columns, tuple)): 194 xcols = [c for c in df.columns if c not in target_columns] 195 X = df[xcols] 196 return X, Y 197 198 199 def rename_column(df, old, new): 200 """ 201 Rename a column in place. 202 203 Functional usage example: 204 205 .. code-block:: python 206 207 df = rename_column("old_column_name", "new_column_name") 208 209 Method chaining example: 210 211 .. code-block:: python 212 213 df = pd.DataFrame(...) 214 df = jn.DataFrame(df).rename_column("old_column_name", "new_column_name") # noqa: E501 215 216 This is just syntactic sugar/a convenience function for renaming one column 217 at a time. If you are convinced that there are multiple columns in need of 218 changing, then use the :py:meth:`pandas.DataFrame.rename` method. 219 220 :param str old: The old column name. 221 :param str new: The new column name. 222 :returns: A pandas DataFrame. 223 """ 224 return df.rename(columns={old: new}) 225 226 227 def coalesce(df, columns, new_column_name): 228 """ 229 Coalesces two or more columns of data in order of column names provided. 230 231 Functional usage example: 232 233 .. code-block:: python 234 235 df = coalesce(df, columns=['col1', 'col2']) 236 237 Method chaining example: 238 239 .. code-block:: python 240 241 df = pd.DataFrame(...) 242 df = jn.DataFrame(df).coalesce(['col1', 'col2']) 243 244 245 The result of this function is that we take the first non-null value across 246 rows. 247 248 This is more syntactic diabetes! For R users, this should look familiar to 249 `dplyr`'s `coalesce` function; for Python users, the interface 250 should be more intuitive than the :py:meth:`pandas.Series.combine_first` 251 method (which we're just using internally anyways). 252 253 :param df: A pandas DataFrame. 254 :param columns: A list of column names. 255 :param str new_column_name: The new column name after combining. 256 :returns: A pandas DataFrame. 257 """ 258 series = [df[c] for c in columns] 259 260 def _coalesce(series1, series2): 261 return series1.combine_first(series2) 262 df = df.drop(columns=columns) 263 df[new_column_name] = reduce(_coalesce, series) # noqa: F821 264 return df 265 266 267 def convert_excel_date(df, column): 268 """ 269 Convert Excel's serial date format into Python datetime format. 270 271 Implementation is also from `Stack Overflow`. 272 273 .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501 274 275 Functional usage example: 276 277 .. code-block:: python 278 279 df = convert_excel_date(df, column='date') 280 281 Method chaining example: 282 283 .. code-block:: python 284 285 df = pd.DataFrame(...) 286 df = jn.DataFrame(df).convert_excel_date('date') 287 288 :param df: A pandas DataFrame. 289 :param str column: A column name. 290 :returns: A pandas DataFrame with corrected dates. 291 """ 292 df[column] = (pd.TimedeltaIndex(df[column], unit='d') 293 + dt.datetime(1899, 12, 30)) 294 return df 295 296 297 def fill_empty(df, columns, value): 298 """ 299 Fill `NaN` values in specified columns with a given value. 300 301 Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`. 302 303 Functional usage example: 304 305 .. code-block:: python 306 307 df = fill_empty(df, columns=['col1', 'col2'], value=0) 308 309 Method chaining example: 310 311 .. code-block:: python 312 313 df = pd.DataFrame(...) 314 df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0) 315 316 :param df: A pandas DataFrame. 317 :param columns: Either a `str` or `list` or `tuple`. If a string is passed 318 in, then only that column will be filled; if a list or tuple of strings 319 are passed in, then they will all be filled with the same value. 320 :param value: The value that replaces the `NaN` values. 321 """ 322 if isinstance(columns, list) or isinstance(columns, tuple): 323 for col in columns: 324 assert col in df.columns, \ 325 JanitorError("{col} missing from dataframe columns!".format(col=col)) # noqa: E501 326 df[col] = df[col].fillna(value) 327 elif isinstance(columns, str): 328 df[columns] = df[columns].fillna(value) 329 else: 330 raise JanitorError('kwarg `columns` must be a string or iterable!') 331 332 return df 333 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/functions.py b/janitor/functions.py --- a/janitor/functions.py +++ b/janitor/functions.py @@ -8,7 +8,38 @@ import re -def clean_names(df): +def _strip_underscores(df, strip_underscores=None): + """ + Strip underscores from the beginning, end or both of the + of the DataFrames column names. + + .. code-block:: python + + df = _strip_underscores(df, strip_underscores='left') + + :param df: The pandas DataFrame object. + :param strip_underscores: (optional) Removes the outer underscores from all + column names. Default None keeps outer underscores. Values can be + either 'left', 'right' or 'both' or the respective shorthand 'l', 'r' + and True. + :returns: A pandas DataFrame. + """ + underscore_options = [None, 'left', 'right', 'both', 'l', 'r', True] + if strip_underscores not in underscore_options: + raise JanitorError( + """strip_underscores must be one of: %s""" % underscore_options + ) + + if strip_underscores in ['left', 'l']: + df = df.rename(columns=lambda x: x.lstrip('_')) + elif strip_underscores in ['right', 'r']: + df = df.rename(columns=lambda x: x.rstrip('_')) + elif strip_underscores == 'both' or strip_underscores is True: + df = df.rename(columns=lambda x: x.strip('_')) + return df + + +def clean_names(df, strip_underscores=None): """ Clean column names. @@ -29,6 +60,10 @@ df = jn.DataFrame(df).clean_names() :param df: The pandas DataFrame object. + :param strip_underscores: (optional) Removes the outer underscores from all + column names. Default None keeps outer underscores. Values can be + either 'left', 'right' or 'both' or the respective shorthand 'l', 'r' + and True. :returns: A pandas DataFrame. """ df = df.rename( @@ -47,6 +82,7 @@ ) df = df.rename(columns=lambda x: re.sub('_+', '_', x)) + df = _strip_underscores(df, strip_underscores) return df @@ -190,7 +226,7 @@ if isinstance(target_columns, str): xcols = [c for c in df.columns if target_columns != c] elif (isinstance(target_columns, list) - or isinstance(target_columns, tuple)): + or isinstance(target_columns, tuple)): xcols = [c for c in df.columns if c not in target_columns] X = df[xcols] return X, Y
{"golden_diff": "diff --git a/janitor/functions.py b/janitor/functions.py\n--- a/janitor/functions.py\n+++ b/janitor/functions.py\n@@ -8,7 +8,38 @@\n import re\n \n \n-def clean_names(df):\n+def _strip_underscores(df, strip_underscores=None):\n+ \"\"\"\n+ Strip underscores from the beginning, end or both of the\n+ of the DataFrames column names.\n+\n+ .. code-block:: python\n+\n+ df = _strip_underscores(df, strip_underscores='left')\n+\n+ :param df: The pandas DataFrame object.\n+ :param strip_underscores: (optional) Removes the outer underscores from all\n+ column names. Default None keeps outer underscores. Values can be\n+ either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n+ and True.\n+ :returns: A pandas DataFrame.\n+ \"\"\"\n+ underscore_options = [None, 'left', 'right', 'both', 'l', 'r', True]\n+ if strip_underscores not in underscore_options:\n+ raise JanitorError(\n+ \"\"\"strip_underscores must be one of: %s\"\"\" % underscore_options\n+ )\n+\n+ if strip_underscores in ['left', 'l']:\n+ df = df.rename(columns=lambda x: x.lstrip('_'))\n+ elif strip_underscores in ['right', 'r']:\n+ df = df.rename(columns=lambda x: x.rstrip('_'))\n+ elif strip_underscores == 'both' or strip_underscores is True:\n+ df = df.rename(columns=lambda x: x.strip('_'))\n+ return df\n+\n+\n+def clean_names(df, strip_underscores=None):\n \"\"\"\n Clean column names.\n \n@@ -29,6 +60,10 @@\n df = jn.DataFrame(df).clean_names()\n \n :param df: The pandas DataFrame object.\n+ :param strip_underscores: (optional) Removes the outer underscores from all\n+ column names. Default None keeps outer underscores. Values can be\n+ either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n+ and True.\n :returns: A pandas DataFrame.\n \"\"\"\n df = df.rename(\n@@ -47,6 +82,7 @@\n )\n \n df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n+ df = _strip_underscores(df, strip_underscores)\n return df\n \n \n@@ -190,7 +226,7 @@\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n- or isinstance(target_columns, tuple)):\n+ or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n", "issue": "Remove Trailing (and Leading) underscores from clean_names\nPandas handles this relatively well but would be a good as a kwarg for `jn.clean_names()`.\r\n\r\nThe default `False` or `None` could leave leading, `True` or `'both'` to remove all and trailing and then pass `'leading'` and `'trailing`' or similar to remove each.\n", "before_files": [{"content": "import datetime as dt\nfrom functools import reduce\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\nimport re\n\n\ndef clean_names(df):\n \"\"\"\n Clean column names.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).clean_names()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n df = df.rename(\n columns=lambda x: x.lower()\n .replace(' ', '_')\n .replace('/', '_')\n .replace(':', '_')\n .replace(\"'\", '')\n .replace('\u2019', '')\n .replace(',', '_')\n .replace('?', '_')\n .replace('-', '_')\n .replace('(', '_')\n .replace(')', '_')\n .replace('.', '_')\n )\n\n df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n return df\n\n\ndef remove_empty(df):\n \"\"\"\n Drop all rows and columns that are completely null.\n\n Implementation is shamelessly copied from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = remove_empty(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).remove_empty()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n\n nanrows = df.index[df.isnull().all(axis=1)]\n df.drop(index=nanrows, inplace=True)\n\n nancols = df.columns[df.isnull().all(axis=0)]\n df.drop(columns=nancols, inplace=True)\n\n return df\n\n\ndef get_dupes(df, columns=None):\n \"\"\"\n Returns all duplicate rows.\n\n Functional usage example:\n\n .. code-block:: python\n\n get_dupes(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n jn.DataFrame(df).get_dupes()\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: (optional) A column name or an iterable (list\n or tuple) of column names. Following pandas API, this only considers\n certain columns for identifying duplicates. Defaults to using all\n columns.\n :returns: The duplicate rows, as a pandas DataFrame.\n \"\"\"\n dupes = df.duplicated(subset=columns, keep=False)\n return df[dupes == True] # noqa: E712\n\n\ndef encode_categorical(df, columns):\n \"\"\"\n Encode the specified columns as categorical.\n\n Functional usage example:\n\n .. code-block:: python\n\n encode_categorical(df, columns=\"my_categorical_column\") # one way\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n categorical_cols = ['col1', 'col2', 'col4']\n jn.DataFrame(df).encode_categorical(columns=categorical_cols)\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: A column name or an iterable (list or tuple)\n of column names.\n :returns: A pandas DataFrame\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = pd.Categorical(df[col])\n elif isinstance(columns, str):\n df[columns] = pd.Categorical(df[columns])\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n return df\n\n\ndef get_features_targets(df, target_columns, feature_columns=None):\n \"\"\"\n Get the features and targets as separate DataFrames/Series.\n\n The behaviour is as such:\n\n - `target_columns` is mandatory.\n - If `feature_columns` is present, then we will respect the column names\n inside there.\n - If `feature_columns` is not passed in, then we will assume that the\n rest of the columns are feature columns, and return them.\n\n Functional usage example:\n\n .. code-block:: python\n\n X, y = get_features_targets(df, target_columns=\"measurement\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n target_cols = ['output1', 'output2']\n X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501\n\n :param df: The pandas DataFrame object.\n :param str/iterable target_columns: Either a column name or an iterable\n (list or tuple) of column names that are the target(s) to be predicted.\n :param str/iterable feature_columns: (optional) The column name or iterable\n of column names that are the features (a.k.a. predictors) used to\n predict the targets.\n :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are\n pandas DataFrames.\n \"\"\"\n Y = df[target_columns]\n\n if feature_columns:\n X = df[feature_columns]\n else:\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n\n\ndef rename_column(df, old, new):\n \"\"\"\n Rename a column in place.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = rename_column(\"old_column_name\", \"new_column_name\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).rename_column(\"old_column_name\", \"new_column_name\") # noqa: E501\n\n This is just syntactic sugar/a convenience function for renaming one column\n at a time. If you are convinced that there are multiple columns in need of\n changing, then use the :py:meth:`pandas.DataFrame.rename` method.\n\n :param str old: The old column name.\n :param str new: The new column name.\n :returns: A pandas DataFrame.\n \"\"\"\n return df.rename(columns={old: new})\n\n\ndef coalesce(df, columns, new_column_name):\n \"\"\"\n Coalesces two or more columns of data in order of column names provided.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = coalesce(df, columns=['col1', 'col2'])\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).coalesce(['col1', 'col2'])\n\n\n The result of this function is that we take the first non-null value across\n rows.\n\n This is more syntactic diabetes! For R users, this should look familiar to\n `dplyr`'s `coalesce` function; for Python users, the interface\n should be more intuitive than the :py:meth:`pandas.Series.combine_first`\n method (which we're just using internally anyways).\n\n :param df: A pandas DataFrame.\n :param columns: A list of column names.\n :param str new_column_name: The new column name after combining.\n :returns: A pandas DataFrame.\n \"\"\"\n series = [df[c] for c in columns]\n\n def _coalesce(series1, series2):\n return series1.combine_first(series2)\n df = df.drop(columns=columns)\n df[new_column_name] = reduce(_coalesce, series) # noqa: F821\n return df\n\n\ndef convert_excel_date(df, column):\n \"\"\"\n Convert Excel's serial date format into Python datetime format.\n\n Implementation is also from `Stack Overflow`.\n\n .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = convert_excel_date(df, column='date')\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).convert_excel_date('date')\n\n :param df: A pandas DataFrame.\n :param str column: A column name.\n :returns: A pandas DataFrame with corrected dates.\n \"\"\"\n df[column] = (pd.TimedeltaIndex(df[column], unit='d')\n + dt.datetime(1899, 12, 30))\n return df\n\n\ndef fill_empty(df, columns, value):\n \"\"\"\n Fill `NaN` values in specified columns with a given value.\n\n Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = fill_empty(df, columns=['col1', 'col2'], value=0)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)\n\n :param df: A pandas DataFrame.\n :param columns: Either a `str` or `list` or `tuple`. If a string is passed\n in, then only that column will be filled; if a list or tuple of strings\n are passed in, then they will all be filled with the same value.\n :param value: The value that replaces the `NaN` values.\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = df[col].fillna(value)\n elif isinstance(columns, str):\n df[columns] = df[columns].fillna(value)\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n\n return df\n", "path": "janitor/functions.py"}], "after_files": [{"content": "import datetime as dt\nfrom functools import reduce\n\nimport pandas as pd\n\nfrom .errors import JanitorError\n\nimport re\n\n\ndef _strip_underscores(df, strip_underscores=None):\n \"\"\"\n Strip underscores from the beginning, end or both of the\n of the DataFrames column names.\n\n .. code-block:: python\n\n df = _strip_underscores(df, strip_underscores='left')\n\n :param df: The pandas DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :returns: A pandas DataFrame.\n \"\"\"\n underscore_options = [None, 'left', 'right', 'both', 'l', 'r', True]\n if strip_underscores not in underscore_options:\n raise JanitorError(\n \"\"\"strip_underscores must be one of: %s\"\"\" % underscore_options\n )\n\n if strip_underscores in ['left', 'l']:\n df = df.rename(columns=lambda x: x.lstrip('_'))\n elif strip_underscores in ['right', 'r']:\n df = df.rename(columns=lambda x: x.rstrip('_'))\n elif strip_underscores == 'both' or strip_underscores is True:\n df = df.rename(columns=lambda x: x.strip('_'))\n return df\n\n\ndef clean_names(df, strip_underscores=None):\n \"\"\"\n Clean column names.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).clean_names()\n\n :param df: The pandas DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :returns: A pandas DataFrame.\n \"\"\"\n df = df.rename(\n columns=lambda x: x.lower()\n .replace(' ', '_')\n .replace('/', '_')\n .replace(':', '_')\n .replace(\"'\", '')\n .replace('\u2019', '')\n .replace(',', '_')\n .replace('?', '_')\n .replace('-', '_')\n .replace('(', '_')\n .replace(')', '_')\n .replace('.', '_')\n )\n\n df = df.rename(columns=lambda x: re.sub('_+', '_', x))\n df = _strip_underscores(df, strip_underscores)\n return df\n\n\ndef remove_empty(df):\n \"\"\"\n Drop all rows and columns that are completely null.\n\n Implementation is shamelessly copied from `StackOverflow`_.\n\n .. _StackOverflow: https://stackoverflow.com/questions/38884538/python-pandas-find-all-rows-where-all-values-are-nan # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = remove_empty(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).remove_empty()\n\n :param df: The pandas DataFrame object.\n :returns: A pandas DataFrame.\n \"\"\"\n\n nanrows = df.index[df.isnull().all(axis=1)]\n df.drop(index=nanrows, inplace=True)\n\n nancols = df.columns[df.isnull().all(axis=0)]\n df.drop(columns=nancols, inplace=True)\n\n return df\n\n\ndef get_dupes(df, columns=None):\n \"\"\"\n Returns all duplicate rows.\n\n Functional usage example:\n\n .. code-block:: python\n\n get_dupes(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n jn.DataFrame(df).get_dupes()\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: (optional) A column name or an iterable (list\n or tuple) of column names. Following pandas API, this only considers\n certain columns for identifying duplicates. Defaults to using all\n columns.\n :returns: The duplicate rows, as a pandas DataFrame.\n \"\"\"\n dupes = df.duplicated(subset=columns, keep=False)\n return df[dupes == True] # noqa: E712\n\n\ndef encode_categorical(df, columns):\n \"\"\"\n Encode the specified columns as categorical.\n\n Functional usage example:\n\n .. code-block:: python\n\n encode_categorical(df, columns=\"my_categorical_column\") # one way\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n categorical_cols = ['col1', 'col2', 'col4']\n jn.DataFrame(df).encode_categorical(columns=categorical_cols)\n\n :param df: The pandas DataFrame object.\n :param str/iterable columns: A column name or an iterable (list or tuple)\n of column names.\n :returns: A pandas DataFrame\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = pd.Categorical(df[col])\n elif isinstance(columns, str):\n df[columns] = pd.Categorical(df[columns])\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n return df\n\n\ndef get_features_targets(df, target_columns, feature_columns=None):\n \"\"\"\n Get the features and targets as separate DataFrames/Series.\n\n The behaviour is as such:\n\n - `target_columns` is mandatory.\n - If `feature_columns` is present, then we will respect the column names\n inside there.\n - If `feature_columns` is not passed in, then we will assume that the\n rest of the columns are feature columns, and return them.\n\n Functional usage example:\n\n .. code-block:: python\n\n X, y = get_features_targets(df, target_columns=\"measurement\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n target_cols = ['output1', 'output2']\n X, y = jn.DataFrame(df).get_features_targets(target_columns=target_cols) # noqa: E501\n\n :param df: The pandas DataFrame object.\n :param str/iterable target_columns: Either a column name or an iterable\n (list or tuple) of column names that are the target(s) to be predicted.\n :param str/iterable feature_columns: (optional) The column name or iterable\n of column names that are the features (a.k.a. predictors) used to\n predict the targets.\n :returns: (X, Y) the feature matrix (X) and the target matrix (Y). Both are\n pandas DataFrames.\n \"\"\"\n Y = df[target_columns]\n\n if feature_columns:\n X = df[feature_columns]\n else:\n if isinstance(target_columns, str):\n xcols = [c for c in df.columns if target_columns != c]\n elif (isinstance(target_columns, list)\n or isinstance(target_columns, tuple)):\n xcols = [c for c in df.columns if c not in target_columns]\n X = df[xcols]\n return X, Y\n\n\ndef rename_column(df, old, new):\n \"\"\"\n Rename a column in place.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = rename_column(\"old_column_name\", \"new_column_name\")\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).rename_column(\"old_column_name\", \"new_column_name\") # noqa: E501\n\n This is just syntactic sugar/a convenience function for renaming one column\n at a time. If you are convinced that there are multiple columns in need of\n changing, then use the :py:meth:`pandas.DataFrame.rename` method.\n\n :param str old: The old column name.\n :param str new: The new column name.\n :returns: A pandas DataFrame.\n \"\"\"\n return df.rename(columns={old: new})\n\n\ndef coalesce(df, columns, new_column_name):\n \"\"\"\n Coalesces two or more columns of data in order of column names provided.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = coalesce(df, columns=['col1', 'col2'])\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).coalesce(['col1', 'col2'])\n\n\n The result of this function is that we take the first non-null value across\n rows.\n\n This is more syntactic diabetes! For R users, this should look familiar to\n `dplyr`'s `coalesce` function; for Python users, the interface\n should be more intuitive than the :py:meth:`pandas.Series.combine_first`\n method (which we're just using internally anyways).\n\n :param df: A pandas DataFrame.\n :param columns: A list of column names.\n :param str new_column_name: The new column name after combining.\n :returns: A pandas DataFrame.\n \"\"\"\n series = [df[c] for c in columns]\n\n def _coalesce(series1, series2):\n return series1.combine_first(series2)\n df = df.drop(columns=columns)\n df[new_column_name] = reduce(_coalesce, series) # noqa: F821\n return df\n\n\ndef convert_excel_date(df, column):\n \"\"\"\n Convert Excel's serial date format into Python datetime format.\n\n Implementation is also from `Stack Overflow`.\n\n .. _Stack Overflow: https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas # noqa: E501\n\n Functional usage example:\n\n .. code-block:: python\n\n df = convert_excel_date(df, column='date')\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).convert_excel_date('date')\n\n :param df: A pandas DataFrame.\n :param str column: A column name.\n :returns: A pandas DataFrame with corrected dates.\n \"\"\"\n df[column] = (pd.TimedeltaIndex(df[column], unit='d')\n + dt.datetime(1899, 12, 30))\n return df\n\n\ndef fill_empty(df, columns, value):\n \"\"\"\n Fill `NaN` values in specified columns with a given value.\n\n Super sugary syntax that wraps :py:meth:`pandas.DataFrame.fillna`.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = fill_empty(df, columns=['col1', 'col2'], value=0)\n\n Method chaining example:\n\n .. code-block:: python\n\n df = pd.DataFrame(...)\n df = jn.DataFrame(df).fill_empty(df, columns='col1', value=0)\n\n :param df: A pandas DataFrame.\n :param columns: Either a `str` or `list` or `tuple`. If a string is passed\n in, then only that column will be filled; if a list or tuple of strings\n are passed in, then they will all be filled with the same value.\n :param value: The value that replaces the `NaN` values.\n \"\"\"\n if isinstance(columns, list) or isinstance(columns, tuple):\n for col in columns:\n assert col in df.columns, \\\n JanitorError(\"{col} missing from dataframe columns!\".format(col=col)) # noqa: E501\n df[col] = df[col].fillna(value)\n elif isinstance(columns, str):\n df[columns] = df[columns].fillna(value)\n else:\n raise JanitorError('kwarg `columns` must be a string or iterable!')\n\n return df\n", "path": "janitor/functions.py"}]}
3,567
654
gh_patches_debug_6163
rasdani/github-patches
git_diff
ESMCI__cime-2596
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- With change in #2588, xmlquery sometimes doesn't give you an error when it should This is a minor issue, so I don't feel like it needs to be fixed for the CESM2 release, but I feel like it would eventually be good to fix this to work more robustly: With the fix that @jedwards4b put in in #2588 , a new issue appears: If you xmlquery a non-existent variable with flag `--file`, you no longer get an error: e.g.: ``` $ ./xmlquery STOP_FOO --file env_run.xml setting case file to env_run.xml $ echo $? 0 ``` (This still works fine - giving an error as expected - if you don't include the --file option.) Since specifying --file in this single-variable usage of xmlquery seems uncommon, this doesn't feel critical to fix immediately. From a quick look through the logic, I'm wondering if a better solution to #2580 would be to revert the logic on these lines to the pre-PR #2588 logic: https://github.com/ESMCI/cime/blob/6c4b1524cd0cdab8257dc98ea6c7e6024a1ac9d9/scripts/Tools/xmlquery#L255-L258 and instead adding an option to `case.get_record_fields`, as called on this line: https://github.com/ESMCI/cime/blob/6c4b1524cd0cdab8257dc98ea6c7e6024a1ac9d9/scripts/Tools/xmlquery#L286 to pass in an optional file; if specified, it would only query that file, rather than querying all files. My understanding of the current --listall logic when --file is given is that it gets variables from all files, then builds up a result list containing just those variables in the given file; with the above suggestion, the subsetting on file would happen up-front. I'm not sure if this would work right, though. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/XML/env_base.py` Content: ``` 1 """ 2 Base class for env files. This class inherits from EntryID.py 3 """ 4 from CIME.XML.standard_module_setup import * 5 from CIME.XML.entry_id import EntryID 6 from CIME.XML.headers import Headers 7 from CIME.utils import convert_to_type 8 logger = logging.getLogger(__name__) 9 10 class EnvBase(EntryID): 11 12 def __init__(self, case_root, infile, schema=None): 13 if case_root is None: 14 case_root = os.getcwd() 15 16 if os.path.isabs(infile): 17 fullpath = infile 18 else: 19 fullpath = os.path.join(case_root, infile) 20 21 EntryID.__init__(self, fullpath, schema=schema, read_only=False) 22 23 self._id_map = None 24 self._group_map = None 25 26 if not os.path.isfile(fullpath): 27 headerobj = Headers() 28 headernode = headerobj.get_header_node(os.path.basename(fullpath)) 29 self.add_child(headernode) 30 else: 31 self._setup_cache() 32 33 def _setup_cache(self): 34 self._id_map = {} # map id directly to nodes 35 self._group_map = {} # map group name to entry id dict 36 37 group_elems = self.get_children("group") 38 for group_elem in group_elems: 39 group_name = self.get(group_elem, "id") 40 expect(group_name not in self._group_map, "Repeat group '{}'".format(group_name)) 41 group_map = {} 42 self._group_map[group_name] = group_map 43 entry_elems = self.get_children("entry", root=group_elem) 44 for entry_elem in entry_elems: 45 entry_id = self.get(entry_elem, "id") 46 expect(entry_id not in group_map, "Repeat entry '{}' in group '{}'".format(entry_id, group_name)) 47 group_map[entry_id] = entry_elem 48 if entry_id in self._id_map: 49 self._id_map[entry_id].append(entry_elem) 50 else: 51 self._id_map[entry_id] = [entry_elem] 52 53 self.lock() 54 55 def change_file(self, newfile, copy=False): 56 self.unlock() 57 EntryID.change_file(self, newfile, copy=copy) 58 self._setup_cache() 59 60 def get_children(self, name=None, attributes=None, root=None): 61 if self.locked and name == "entry" and attributes is not None and attributes.keys() == ["id"]: 62 entry_id = attributes["id"] 63 if root is None or self.name(root) == "file": 64 if entry_id in self._id_map: 65 return self._id_map[entry_id] 66 else: 67 return [] 68 else: 69 expect(self.name(root) == "group", "Unexpected elem '{}' for {}, attrs {}".format(self.name(root), self.filename, self.attrib(root))) 70 group_id = self.get(root, "id") 71 if group_id in self._group_map and entry_id in self._group_map[group_id]: 72 return [self._group_map[group_id][entry_id]] 73 else: 74 return [] 75 76 else: 77 # Non-compliant look up 78 return EntryID.get_children(self, name=name, attributes=attributes, root=root) 79 80 def scan_children(self, nodename, attributes=None, root=None): 81 if self.locked and nodename == "entry" and attributes is not None and attributes.keys() == ["id"]: 82 return EnvBase.get_children(self, name=nodename, attributes=attributes, root=root) 83 else: 84 return EntryID.scan_children(self, nodename, attributes=attributes, root=root) 85 86 def set_components(self, components): 87 if hasattr(self, '_components'): 88 # pylint: disable=attribute-defined-outside-init 89 self._components = components 90 91 def check_if_comp_var(self, vid, attribute=None, node=None): 92 comp = None 93 if node is None: 94 nodes = self.scan_children("entry", {"id" : vid}) 95 if len(nodes): 96 node = nodes[0] 97 98 if node: 99 valnodes = self.scan_children("value", attributes={"compclass":None}, root=node) 100 if len(valnodes) == 0: 101 logger.debug("vid {} is not a compvar".format(vid)) 102 return vid, None, False 103 else: 104 logger.debug("vid {} is a compvar".format(vid)) 105 if attribute is not None: 106 comp = attribute["compclass"] 107 return vid, comp, True 108 else: 109 if hasattr(self, "_components"): 110 new_vid = None 111 for comp in self._components: 112 if vid.endswith('_'+comp): 113 new_vid = vid.replace('_'+comp, '', 1) 114 elif vid.startswith(comp+'_'): 115 new_vid = vid.replace(comp+'_', '', 1) 116 elif '_' + comp + '_' in vid: 117 new_vid = vid.replace(comp+'_','', 1) 118 if new_vid is not None: 119 break 120 if new_vid is not None: 121 logger.debug("vid {} is a compvar with comp {}".format(vid, comp)) 122 return new_vid, comp, True 123 124 return vid, None, False 125 126 def get_value(self, vid, attribute=None, resolved=True, subgroup=None): 127 """ 128 Get a value for entry with id attribute vid. 129 or from the values field if the attribute argument is provided 130 and matches 131 """ 132 value = None 133 vid, comp, iscompvar = self.check_if_comp_var(vid, attribute) 134 logger.debug("vid {} comp {} iscompvar {}".format(vid, comp, iscompvar)) 135 if iscompvar: 136 if comp is None: 137 if subgroup is not None: 138 comp = subgroup 139 else: 140 logger.debug("Not enough info to get value for {}".format(vid)) 141 return value 142 if attribute is None: 143 attribute = {"compclass" : comp} 144 else: 145 attribute["compclass"] = comp 146 node = self.scan_optional_child("entry", {"id":vid}) 147 if node is not None: 148 type_str = self._get_type_info(node) 149 values = self.get_optional_child("values", root=node) 150 node = values if values is not None else node 151 val = self.get_element_text("value", attribute, root=node) 152 if val is not None: 153 if val.startswith("$"): 154 value = val 155 else: 156 value = convert_to_type(val,type_str, vid) 157 return value 158 159 return EntryID.get_value(self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup) 160 161 def set_value(self, vid, value, subgroup=None, ignore_type=False): 162 """ 163 Set the value of an entry-id field to value 164 Returns the value or None if not found 165 subgroup is ignored in the general routine and applied in specific methods 166 """ 167 vid, comp, iscompvar = self.check_if_comp_var(vid, None) 168 val = None 169 root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) 170 node = self.scan_optional_child("entry", {"id":vid}, root=root) 171 if node is not None: 172 if iscompvar and comp is None: 173 # pylint: disable=no-member 174 for comp in self._components: 175 val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) 176 else: 177 val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) 178 return val 179 180 # pylint: disable=arguments-differ 181 def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None): 182 if vid is None: 183 vid = self.get(node, "id") 184 vid, _, iscompvar = self.check_if_comp_var(vid, node=node) 185 186 if iscompvar: 187 expect(compclass is not None, "compclass must be specified if is comp var") 188 attribute = {"compclass":compclass} 189 str_value = self.get_valid_value_string(node, value, vid, ignore_type) 190 values = self.get_optional_child("values", root=node) 191 node = values if values is not None else node 192 val = self.set_element_text("value", str_value, attribute, root=node) 193 else: 194 val = EntryID._set_value(self, node, value, vid, subgroup, ignore_type) 195 return val 196 197 def get_nodes_by_id(self, varid): 198 varid, _, _ = self.check_if_comp_var(varid, None) 199 return EntryID.get_nodes_by_id(self, varid) 200 201 def cleanupnode(self, node): 202 """ 203 Remove the <group>, <file>, <values> and <value> childnodes from node 204 """ 205 fnode = self.get_child("file", root=node) 206 self.remove_child(fnode, node) 207 gnode = self.get_child("group", root=node) 208 self.remove_child(gnode, node) 209 dnode = self.get_optional_child("default_value", root=node) 210 if dnode is not None: 211 self.remove_child(dnode, node) 212 213 vnode = self.get_optional_child("values", root=node) 214 if vnode is not None: 215 componentatt = self.get_children("value", attributes={"component":"ATM"}, root=vnode) 216 # backward compatibility (compclasses and component were mixed 217 # now we seperated into component and compclass) 218 if len(componentatt) > 0: 219 for ccnode in self.get_children("value", attributes={"component":None}, root=vnode): 220 val = self.get(ccnode, "component") 221 self.pop(ccnode, "component") 222 self.set(ccnode, "compclass", val) 223 224 compclassatt = self.get_children("value", attributes={"compclass":None}, root=vnode) 225 if len(compclassatt) == 0: 226 self.remove_child(vnode, root=node) 227 228 return node 229 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/XML/env_base.py b/scripts/lib/CIME/XML/env_base.py --- a/scripts/lib/CIME/XML/env_base.py +++ b/scripts/lib/CIME/XML/env_base.py @@ -106,7 +106,7 @@ comp = attribute["compclass"] return vid, comp, True else: - if hasattr(self, "_components"): + if hasattr(self, "_components") and self._components: new_vid = None for comp in self._components: if vid.endswith('_'+comp):
{"golden_diff": "diff --git a/scripts/lib/CIME/XML/env_base.py b/scripts/lib/CIME/XML/env_base.py\n--- a/scripts/lib/CIME/XML/env_base.py\n+++ b/scripts/lib/CIME/XML/env_base.py\n@@ -106,7 +106,7 @@\n comp = attribute[\"compclass\"]\n return vid, comp, True\n else:\n- if hasattr(self, \"_components\"):\n+ if hasattr(self, \"_components\") and self._components:\n new_vid = None\n for comp in self._components:\n if vid.endswith('_'+comp):\n", "issue": "With change in #2588, xmlquery sometimes doesn't give you an error when it should\nThis is a minor issue, so I don't feel like it needs to be fixed for the CESM2 release, but I feel like it would eventually be good to fix this to work more robustly:\r\n\r\nWith the fix that @jedwards4b put in in #2588 , a new issue appears: If you xmlquery a non-existent variable with flag `--file`, you no longer get an error: e.g.:\r\n\r\n```\r\n$ ./xmlquery STOP_FOO --file env_run.xml\r\nsetting case file to env_run.xml\r\n$ echo $?\r\n0\r\n```\r\n\r\n(This still works fine - giving an error as expected - if you don't include the --file option.)\r\n\r\nSince specifying --file in this single-variable usage of xmlquery seems uncommon, this doesn't feel critical to fix immediately.\r\n\r\nFrom a quick look through the logic, I'm wondering if a better solution to #2580 would be to revert the logic on these lines to the pre-PR #2588 logic:\r\n\r\nhttps://github.com/ESMCI/cime/blob/6c4b1524cd0cdab8257dc98ea6c7e6024a1ac9d9/scripts/Tools/xmlquery#L255-L258\r\n\r\nand instead adding an option to `case.get_record_fields`, as called on this line:\r\n\r\nhttps://github.com/ESMCI/cime/blob/6c4b1524cd0cdab8257dc98ea6c7e6024a1ac9d9/scripts/Tools/xmlquery#L286\r\n\r\nto pass in an optional file; if specified, it would only query that file, rather than querying all files.\r\n\r\nMy understanding of the current --listall logic when --file is given is that it gets variables from all files, then builds up a result list containing just those variables in the given file; with the above suggestion, the subsetting on file would happen up-front. I'm not sure if this would work right, though.\n", "before_files": [{"content": "\"\"\"\nBase class for env files. This class inherits from EntryID.py\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.XML.entry_id import EntryID\nfrom CIME.XML.headers import Headers\nfrom CIME.utils import convert_to_type\nlogger = logging.getLogger(__name__)\n\nclass EnvBase(EntryID):\n\n def __init__(self, case_root, infile, schema=None):\n if case_root is None:\n case_root = os.getcwd()\n\n if os.path.isabs(infile):\n fullpath = infile\n else:\n fullpath = os.path.join(case_root, infile)\n\n EntryID.__init__(self, fullpath, schema=schema, read_only=False)\n\n self._id_map = None\n self._group_map = None\n\n if not os.path.isfile(fullpath):\n headerobj = Headers()\n headernode = headerobj.get_header_node(os.path.basename(fullpath))\n self.add_child(headernode)\n else:\n self._setup_cache()\n\n def _setup_cache(self):\n self._id_map = {} # map id directly to nodes\n self._group_map = {} # map group name to entry id dict\n\n group_elems = self.get_children(\"group\")\n for group_elem in group_elems:\n group_name = self.get(group_elem, \"id\")\n expect(group_name not in self._group_map, \"Repeat group '{}'\".format(group_name))\n group_map = {}\n self._group_map[group_name] = group_map\n entry_elems = self.get_children(\"entry\", root=group_elem)\n for entry_elem in entry_elems:\n entry_id = self.get(entry_elem, \"id\")\n expect(entry_id not in group_map, \"Repeat entry '{}' in group '{}'\".format(entry_id, group_name))\n group_map[entry_id] = entry_elem\n if entry_id in self._id_map:\n self._id_map[entry_id].append(entry_elem)\n else:\n self._id_map[entry_id] = [entry_elem]\n\n self.lock()\n\n def change_file(self, newfile, copy=False):\n self.unlock()\n EntryID.change_file(self, newfile, copy=copy)\n self._setup_cache()\n\n def get_children(self, name=None, attributes=None, root=None):\n if self.locked and name == \"entry\" and attributes is not None and attributes.keys() == [\"id\"]:\n entry_id = attributes[\"id\"]\n if root is None or self.name(root) == \"file\":\n if entry_id in self._id_map:\n return self._id_map[entry_id]\n else:\n return []\n else:\n expect(self.name(root) == \"group\", \"Unexpected elem '{}' for {}, attrs {}\".format(self.name(root), self.filename, self.attrib(root)))\n group_id = self.get(root, \"id\")\n if group_id in self._group_map and entry_id in self._group_map[group_id]:\n return [self._group_map[group_id][entry_id]]\n else:\n return []\n\n else:\n # Non-compliant look up\n return EntryID.get_children(self, name=name, attributes=attributes, root=root)\n\n def scan_children(self, nodename, attributes=None, root=None):\n if self.locked and nodename == \"entry\" and attributes is not None and attributes.keys() == [\"id\"]:\n return EnvBase.get_children(self, name=nodename, attributes=attributes, root=root)\n else:\n return EntryID.scan_children(self, nodename, attributes=attributes, root=root)\n\n def set_components(self, components):\n if hasattr(self, '_components'):\n # pylint: disable=attribute-defined-outside-init\n self._components = components\n\n def check_if_comp_var(self, vid, attribute=None, node=None):\n comp = None\n if node is None:\n nodes = self.scan_children(\"entry\", {\"id\" : vid})\n if len(nodes):\n node = nodes[0]\n\n if node:\n valnodes = self.scan_children(\"value\", attributes={\"compclass\":None}, root=node)\n if len(valnodes) == 0:\n logger.debug(\"vid {} is not a compvar\".format(vid))\n return vid, None, False\n else:\n logger.debug(\"vid {} is a compvar\".format(vid))\n if attribute is not None:\n comp = attribute[\"compclass\"]\n return vid, comp, True\n else:\n if hasattr(self, \"_components\"):\n new_vid = None\n for comp in self._components:\n if vid.endswith('_'+comp):\n new_vid = vid.replace('_'+comp, '', 1)\n elif vid.startswith(comp+'_'):\n new_vid = vid.replace(comp+'_', '', 1)\n elif '_' + comp + '_' in vid:\n new_vid = vid.replace(comp+'_','', 1)\n if new_vid is not None:\n break\n if new_vid is not None:\n logger.debug(\"vid {} is a compvar with comp {}\".format(vid, comp))\n return new_vid, comp, True\n\n return vid, None, False\n\n def get_value(self, vid, attribute=None, resolved=True, subgroup=None):\n \"\"\"\n Get a value for entry with id attribute vid.\n or from the values field if the attribute argument is provided\n and matches\n \"\"\"\n value = None\n vid, comp, iscompvar = self.check_if_comp_var(vid, attribute)\n logger.debug(\"vid {} comp {} iscompvar {}\".format(vid, comp, iscompvar))\n if iscompvar:\n if comp is None:\n if subgroup is not None:\n comp = subgroup\n else:\n logger.debug(\"Not enough info to get value for {}\".format(vid))\n return value\n if attribute is None:\n attribute = {\"compclass\" : comp}\n else:\n attribute[\"compclass\"] = comp\n node = self.scan_optional_child(\"entry\", {\"id\":vid})\n if node is not None:\n type_str = self._get_type_info(node)\n values = self.get_optional_child(\"values\", root=node)\n node = values if values is not None else node\n val = self.get_element_text(\"value\", attribute, root=node)\n if val is not None:\n if val.startswith(\"$\"):\n value = val\n else:\n value = convert_to_type(val,type_str, vid)\n return value\n\n return EntryID.get_value(self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup)\n\n def set_value(self, vid, value, subgroup=None, ignore_type=False):\n \"\"\"\n Set the value of an entry-id field to value\n Returns the value or None if not found\n subgroup is ignored in the general routine and applied in specific methods\n \"\"\"\n vid, comp, iscompvar = self.check_if_comp_var(vid, None)\n val = None\n root = self.root if subgroup is None else self.get_optional_child(\"group\", {\"id\":subgroup})\n node = self.scan_optional_child(\"entry\", {\"id\":vid}, root=root)\n if node is not None:\n if iscompvar and comp is None:\n # pylint: disable=no-member\n for comp in self._components:\n val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp)\n else:\n val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp)\n return val\n\n # pylint: disable=arguments-differ\n def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None):\n if vid is None:\n vid = self.get(node, \"id\")\n vid, _, iscompvar = self.check_if_comp_var(vid, node=node)\n\n if iscompvar:\n expect(compclass is not None, \"compclass must be specified if is comp var\")\n attribute = {\"compclass\":compclass}\n str_value = self.get_valid_value_string(node, value, vid, ignore_type)\n values = self.get_optional_child(\"values\", root=node)\n node = values if values is not None else node\n val = self.set_element_text(\"value\", str_value, attribute, root=node)\n else:\n val = EntryID._set_value(self, node, value, vid, subgroup, ignore_type)\n return val\n\n def get_nodes_by_id(self, varid):\n varid, _, _ = self.check_if_comp_var(varid, None)\n return EntryID.get_nodes_by_id(self, varid)\n\n def cleanupnode(self, node):\n \"\"\"\n Remove the <group>, <file>, <values> and <value> childnodes from node\n \"\"\"\n fnode = self.get_child(\"file\", root=node)\n self.remove_child(fnode, node)\n gnode = self.get_child(\"group\", root=node)\n self.remove_child(gnode, node)\n dnode = self.get_optional_child(\"default_value\", root=node)\n if dnode is not None:\n self.remove_child(dnode, node)\n\n vnode = self.get_optional_child(\"values\", root=node)\n if vnode is not None:\n componentatt = self.get_children(\"value\", attributes={\"component\":\"ATM\"}, root=vnode)\n # backward compatibility (compclasses and component were mixed\n # now we seperated into component and compclass)\n if len(componentatt) > 0:\n for ccnode in self.get_children(\"value\", attributes={\"component\":None}, root=vnode):\n val = self.get(ccnode, \"component\")\n self.pop(ccnode, \"component\")\n self.set(ccnode, \"compclass\", val)\n\n compclassatt = self.get_children(\"value\", attributes={\"compclass\":None}, root=vnode)\n if len(compclassatt) == 0:\n self.remove_child(vnode, root=node)\n\n return node\n", "path": "scripts/lib/CIME/XML/env_base.py"}], "after_files": [{"content": "\"\"\"\nBase class for env files. This class inherits from EntryID.py\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.XML.entry_id import EntryID\nfrom CIME.XML.headers import Headers\nfrom CIME.utils import convert_to_type\nlogger = logging.getLogger(__name__)\n\nclass EnvBase(EntryID):\n\n def __init__(self, case_root, infile, schema=None):\n if case_root is None:\n case_root = os.getcwd()\n\n if os.path.isabs(infile):\n fullpath = infile\n else:\n fullpath = os.path.join(case_root, infile)\n\n EntryID.__init__(self, fullpath, schema=schema, read_only=False)\n\n self._id_map = None\n self._group_map = None\n\n if not os.path.isfile(fullpath):\n headerobj = Headers()\n headernode = headerobj.get_header_node(os.path.basename(fullpath))\n self.add_child(headernode)\n else:\n self._setup_cache()\n\n def _setup_cache(self):\n self._id_map = {} # map id directly to nodes\n self._group_map = {} # map group name to entry id dict\n\n group_elems = self.get_children(\"group\")\n for group_elem in group_elems:\n group_name = self.get(group_elem, \"id\")\n expect(group_name not in self._group_map, \"Repeat group '{}'\".format(group_name))\n group_map = {}\n self._group_map[group_name] = group_map\n entry_elems = self.get_children(\"entry\", root=group_elem)\n for entry_elem in entry_elems:\n entry_id = self.get(entry_elem, \"id\")\n expect(entry_id not in group_map, \"Repeat entry '{}' in group '{}'\".format(entry_id, group_name))\n group_map[entry_id] = entry_elem\n if entry_id in self._id_map:\n self._id_map[entry_id].append(entry_elem)\n else:\n self._id_map[entry_id] = [entry_elem]\n\n self.lock()\n\n def change_file(self, newfile, copy=False):\n self.unlock()\n EntryID.change_file(self, newfile, copy=copy)\n self._setup_cache()\n\n def get_children(self, name=None, attributes=None, root=None):\n if self.locked and name == \"entry\" and attributes is not None and attributes.keys() == [\"id\"]:\n entry_id = attributes[\"id\"]\n if root is None or self.name(root) == \"file\":\n if entry_id in self._id_map:\n return self._id_map[entry_id]\n else:\n return []\n else:\n expect(self.name(root) == \"group\", \"Unexpected elem '{}' for {}, attrs {}\".format(self.name(root), self.filename, self.attrib(root)))\n group_id = self.get(root, \"id\")\n if group_id in self._group_map and entry_id in self._group_map[group_id]:\n return [self._group_map[group_id][entry_id]]\n else:\n return []\n\n else:\n # Non-compliant look up\n return EntryID.get_children(self, name=name, attributes=attributes, root=root)\n\n def scan_children(self, nodename, attributes=None, root=None):\n if self.locked and nodename == \"entry\" and attributes is not None and attributes.keys() == [\"id\"]:\n return EnvBase.get_children(self, name=nodename, attributes=attributes, root=root)\n else:\n return EntryID.scan_children(self, nodename, attributes=attributes, root=root)\n\n def set_components(self, components):\n if hasattr(self, '_components'):\n # pylint: disable=attribute-defined-outside-init\n self._components = components\n\n def check_if_comp_var(self, vid, attribute=None, node=None):\n comp = None\n if node is None:\n nodes = self.scan_children(\"entry\", {\"id\" : vid})\n if len(nodes):\n node = nodes[0]\n\n if node:\n valnodes = self.scan_children(\"value\", attributes={\"compclass\":None}, root=node)\n if len(valnodes) == 0:\n logger.debug(\"vid {} is not a compvar\".format(vid))\n return vid, None, False\n else:\n logger.debug(\"vid {} is a compvar\".format(vid))\n if attribute is not None:\n comp = attribute[\"compclass\"]\n return vid, comp, True\n else:\n if hasattr(self, \"_components\") and self._components:\n new_vid = None\n for comp in self._components:\n if vid.endswith('_'+comp):\n new_vid = vid.replace('_'+comp, '', 1)\n elif vid.startswith(comp+'_'):\n new_vid = vid.replace(comp+'_', '', 1)\n elif '_' + comp + '_' in vid:\n new_vid = vid.replace(comp+'_','', 1)\n if new_vid is not None:\n break\n if new_vid is not None:\n logger.debug(\"vid {} is a compvar with comp {}\".format(vid, comp))\n return new_vid, comp, True\n\n return vid, None, False\n\n def get_value(self, vid, attribute=None, resolved=True, subgroup=None):\n \"\"\"\n Get a value for entry with id attribute vid.\n or from the values field if the attribute argument is provided\n and matches\n \"\"\"\n value = None\n vid, comp, iscompvar = self.check_if_comp_var(vid, attribute)\n logger.debug(\"vid {} comp {} iscompvar {}\".format(vid, comp, iscompvar))\n if iscompvar:\n if comp is None:\n if subgroup is not None:\n comp = subgroup\n else:\n logger.debug(\"Not enough info to get value for {}\".format(vid))\n return value\n if attribute is None:\n attribute = {\"compclass\" : comp}\n else:\n attribute[\"compclass\"] = comp\n node = self.scan_optional_child(\"entry\", {\"id\":vid})\n if node is not None:\n type_str = self._get_type_info(node)\n values = self.get_optional_child(\"values\", root=node)\n node = values if values is not None else node\n val = self.get_element_text(\"value\", attribute, root=node)\n if val is not None:\n if val.startswith(\"$\"):\n value = val\n else:\n value = convert_to_type(val,type_str, vid)\n return value\n\n return EntryID.get_value(self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup)\n\n def set_value(self, vid, value, subgroup=None, ignore_type=False):\n \"\"\"\n Set the value of an entry-id field to value\n Returns the value or None if not found\n subgroup is ignored in the general routine and applied in specific methods\n \"\"\"\n vid, comp, iscompvar = self.check_if_comp_var(vid, None)\n val = None\n root = self.root if subgroup is None else self.get_optional_child(\"group\", {\"id\":subgroup})\n node = self.scan_optional_child(\"entry\", {\"id\":vid}, root=root)\n if node is not None:\n if iscompvar and comp is None:\n # pylint: disable=no-member\n for comp in self._components:\n val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp)\n else:\n val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp)\n return val\n\n # pylint: disable=arguments-differ\n def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None):\n if vid is None:\n vid = self.get(node, \"id\")\n vid, _, iscompvar = self.check_if_comp_var(vid, node=node)\n\n if iscompvar:\n expect(compclass is not None, \"compclass must be specified if is comp var\")\n attribute = {\"compclass\":compclass}\n str_value = self.get_valid_value_string(node, value, vid, ignore_type)\n values = self.get_optional_child(\"values\", root=node)\n node = values if values is not None else node\n val = self.set_element_text(\"value\", str_value, attribute, root=node)\n else:\n val = EntryID._set_value(self, node, value, vid, subgroup, ignore_type)\n return val\n\n def get_nodes_by_id(self, varid):\n varid, _, _ = self.check_if_comp_var(varid, None)\n return EntryID.get_nodes_by_id(self, varid)\n\n def cleanupnode(self, node):\n \"\"\"\n Remove the <group>, <file>, <values> and <value> childnodes from node\n \"\"\"\n fnode = self.get_child(\"file\", root=node)\n self.remove_child(fnode, node)\n gnode = self.get_child(\"group\", root=node)\n self.remove_child(gnode, node)\n dnode = self.get_optional_child(\"default_value\", root=node)\n if dnode is not None:\n self.remove_child(dnode, node)\n\n vnode = self.get_optional_child(\"values\", root=node)\n if vnode is not None:\n componentatt = self.get_children(\"value\", attributes={\"component\":\"ATM\"}, root=vnode)\n # backward compatibility (compclasses and component were mixed\n # now we seperated into component and compclass)\n if len(componentatt) > 0:\n for ccnode in self.get_children(\"value\", attributes={\"component\":None}, root=vnode):\n val = self.get(ccnode, \"component\")\n self.pop(ccnode, \"component\")\n self.set(ccnode, \"compclass\", val)\n\n compclassatt = self.get_children(\"value\", attributes={\"compclass\":None}, root=vnode)\n if len(compclassatt) == 0:\n self.remove_child(vnode, root=node)\n\n return node\n", "path": "scripts/lib/CIME/XML/env_base.py"}]}
3,428
122
gh_patches_debug_23649
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-5403
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- enterprise car hire spider missing ~6k outlets In the latest run of the enterprise.py spider from 2023-05-15, there are around 6000 fewer results than in the previous run from 2023-04-15. I think the cause is that the search API that ATP uses is no longer returning results for some major countries. For example, the following still work: * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/IT * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/BE * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/MX but the following do not: * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/GB * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/US * https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/FR I'm not sure why the API has stopped returning for certain countries, or if there's anything we can do about it. If we want to look at alternative means, then it's possible that the "Airport Locations" and "Neighbourhood Locations" lists at https://www.enterprise.co.uk/en/car-hire/locations/uk.html are complete for GB. (Together they have 427 locations, compared with the 425 I extracted for my Chain Reaction tool from a previous ATP dataset.) But then you'd need to parse each branch page, and that's a lot more HTTP fetches. I haven't looked what other localised sites do in terms of location lists. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/enterprise.py` Content: ``` 1 import geonamescache 2 from scrapy import Spider 3 from scrapy.http import JsonRequest 4 5 from locations.dict_parser import DictParser 6 7 8 class EnterpriseSpider(Spider): 9 name = "enterprise" 10 item_attributes = {"brand": "Enterprise Rent-A-Car", "brand_wikidata": "Q17085454"} 11 allowed_domains = ["prd.location.enterprise.com"] 12 13 def start_requests(self): 14 gc = geonamescache.GeonamesCache() 15 countries = gc.get_countries() 16 for country_code in countries.keys(): 17 yield JsonRequest( 18 url=f"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" 19 ) 20 21 def parse(self, response): 22 for location in response.json(): 23 if location["closed"] or not location["physicalLocation"]: 24 continue 25 item = DictParser.parse(location) 26 item["ref"] = location["stationId"] 27 item["name"] = location["locationNameTranslation"] 28 item["street_address"] = ", ".join(filter(None, location["addressLines"])) 29 item["phone"] = location["formattedPhone"] 30 yield item 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/enterprise.py b/locations/spiders/enterprise.py --- a/locations/spiders/enterprise.py +++ b/locations/spiders/enterprise.py @@ -8,15 +8,20 @@ class EnterpriseSpider(Spider): name = "enterprise" item_attributes = {"brand": "Enterprise Rent-A-Car", "brand_wikidata": "Q17085454"} - allowed_domains = ["prd.location.enterprise.com"] + allowed_domains = ["prd.location.enterprise.com", "int1.location.enterprise.com"] def start_requests(self): gc = geonamescache.GeonamesCache() countries = gc.get_countries() for country_code in countries.keys(): - yield JsonRequest( - url=f"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" - ) + # It appears that countries are sharded between two + # servers. Other servers are int2, xqa1, xqa2, xqa3 + # but search of these servers reveals no additional + # locations on top of just prd and int1. + for subdomain in ["prd", "int1"]: + yield JsonRequest( + url=f"https://{subdomain}.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}" + ) def parse(self, response): for location in response.json():
{"golden_diff": "diff --git a/locations/spiders/enterprise.py b/locations/spiders/enterprise.py\n--- a/locations/spiders/enterprise.py\n+++ b/locations/spiders/enterprise.py\n@@ -8,15 +8,20 @@\n class EnterpriseSpider(Spider):\n name = \"enterprise\"\n item_attributes = {\"brand\": \"Enterprise Rent-A-Car\", \"brand_wikidata\": \"Q17085454\"}\n- allowed_domains = [\"prd.location.enterprise.com\"]\n+ allowed_domains = [\"prd.location.enterprise.com\", \"int1.location.enterprise.com\"]\n \n def start_requests(self):\n gc = geonamescache.GeonamesCache()\n countries = gc.get_countries()\n for country_code in countries.keys():\n- yield JsonRequest(\n- url=f\"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n- )\n+ # It appears that countries are sharded between two\n+ # servers. Other servers are int2, xqa1, xqa2, xqa3\n+ # but search of these servers reveals no additional\n+ # locations on top of just prd and int1.\n+ for subdomain in [\"prd\", \"int1\"]:\n+ yield JsonRequest(\n+ url=f\"https://{subdomain}.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n+ )\n \n def parse(self, response):\n for location in response.json():\n", "issue": "enterprise car hire spider missing ~6k outlets\nIn the latest run of the enterprise.py spider from 2023-05-15, there are around 6000 fewer results than in the previous run from 2023-04-15.\r\n\r\nI think the cause is that the search API that ATP uses is no longer returning results for some major countries. For example, the following still work:\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/IT\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/BE\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/MX\r\n\r\nbut the following do not:\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/GB\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/US\r\n* https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/FR\r\n\r\nI'm not sure why the API has stopped returning for certain countries, or if there's anything we can do about it.\r\n\r\nIf we want to look at alternative means, then it's possible that the \"Airport Locations\" and \"Neighbourhood Locations\" lists at https://www.enterprise.co.uk/en/car-hire/locations/uk.html are complete for GB. (Together they have 427 locations, compared with the 425 I extracted for my Chain Reaction tool from a previous ATP dataset.) But then you'd need to parse each branch page, and that's a lot more HTTP fetches. I haven't looked what other localised sites do in terms of location lists.\n", "before_files": [{"content": "import geonamescache\nfrom scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.dict_parser import DictParser\n\n\nclass EnterpriseSpider(Spider):\n name = \"enterprise\"\n item_attributes = {\"brand\": \"Enterprise Rent-A-Car\", \"brand_wikidata\": \"Q17085454\"}\n allowed_domains = [\"prd.location.enterprise.com\"]\n\n def start_requests(self):\n gc = geonamescache.GeonamesCache()\n countries = gc.get_countries()\n for country_code in countries.keys():\n yield JsonRequest(\n url=f\"https://prd.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n )\n\n def parse(self, response):\n for location in response.json():\n if location[\"closed\"] or not location[\"physicalLocation\"]:\n continue\n item = DictParser.parse(location)\n item[\"ref\"] = location[\"stationId\"]\n item[\"name\"] = location[\"locationNameTranslation\"]\n item[\"street_address\"] = \", \".join(filter(None, location[\"addressLines\"]))\n item[\"phone\"] = location[\"formattedPhone\"]\n yield item\n", "path": "locations/spiders/enterprise.py"}], "after_files": [{"content": "import geonamescache\nfrom scrapy import Spider\nfrom scrapy.http import JsonRequest\n\nfrom locations.dict_parser import DictParser\n\n\nclass EnterpriseSpider(Spider):\n name = \"enterprise\"\n item_attributes = {\"brand\": \"Enterprise Rent-A-Car\", \"brand_wikidata\": \"Q17085454\"}\n allowed_domains = [\"prd.location.enterprise.com\", \"int1.location.enterprise.com\"]\n\n def start_requests(self):\n gc = geonamescache.GeonamesCache()\n countries = gc.get_countries()\n for country_code in countries.keys():\n # It appears that countries are sharded between two\n # servers. Other servers are int2, xqa1, xqa2, xqa3\n # but search of these servers reveals no additional\n # locations on top of just prd and int1.\n for subdomain in [\"prd\", \"int1\"]:\n yield JsonRequest(\n url=f\"https://{subdomain}.location.enterprise.com/enterprise-sls/search/location/enterprise/web/country/{country_code}\"\n )\n\n def parse(self, response):\n for location in response.json():\n if location[\"closed\"] or not location[\"physicalLocation\"]:\n continue\n item = DictParser.parse(location)\n item[\"ref\"] = location[\"stationId\"]\n item[\"name\"] = location[\"locationNameTranslation\"]\n item[\"street_address\"] = \", \".join(filter(None, location[\"addressLines\"]))\n item[\"phone\"] = location[\"formattedPhone\"]\n yield item\n", "path": "locations/spiders/enterprise.py"}]}
916
317
gh_patches_debug_36253
rasdani/github-patches
git_diff
bokeh__bokeh-4723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Warn about version mismatches I found myself chasing ghosts in Bokeh this afternoon when I accidentally inlined the wrong JS version into our web application. Perhaps Bokeh could try and detect when there are incompatible versions across the Python/JS interface? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/version_update.py` Content: ``` 1 import os 2 import re 3 import sys 4 5 6 def check_input(new_ver): 7 """ Ensure that user input matches the format X.X.X """ 8 9 pat = r'\d+.\d+.\d+' 10 if not re.match(pat, new_ver): 11 print("The new version must be in the format X.X.X (ex. '0.6.0')") 12 return True 13 14 15 def version_update(new_ver, file_array): 16 """ Replace existing version/release number in an array of files 17 with a user-supplied version number (new_ver)""" 18 19 pat = r"""(release|version)([\" ][:=] [\"\'])(\d+.\d+.\d+)([\"\'])""" 20 21 # List that will contain any files where the version number was successfully replaced 22 replaced = [] 23 24 # Set as false until a match is found and replaced in the loop below 25 early_ver = False 26 27 for ver_file in file_array: 28 f = open(ver_file) 29 text = f.read() 30 matchObj = re.search(pat, text) 31 f.close() 32 33 if matchObj: 34 early_ver = matchObj.group(3) 35 f = open(ver_file, 'w') 36 text = re.sub(pat, r'\g<1>\g<2>%s\g<4>' % new_ver, text) 37 f.write(text) 38 replaced.append(ver_file) 39 else: 40 print("Unable to find version number matching expected format 'X.X.X' in %s" % ver_file) 41 42 if early_ver: 43 print("Version number changed from %s to %s in \n%s" % (early_ver, new_ver, replaced)) 44 45 def version_add(old_ver, file_array): 46 """Add last version number in an array of files 47 with a user-supplied last version number""" 48 for ver_file in file_array: 49 with open(ver_file, "r") as f: 50 flines = f.readlines() 51 for i, line in enumerate(flines): 52 if "ALL_VERSIONS" in line: 53 all_vers = flines[i] 54 begin, end = all_vers.split("[") 55 all_vers = begin + "['{}', ".format(old_ver) + end 56 flines[i] = all_vers 57 with open(ver_file, "w") as f: 58 f.writelines(flines) 59 print("Version number {old_ver} added in {ver_file}".format(old_ver=old_ver, ver_file=ver_file)) 60 61 if __name__ == '__main__': 62 if not len(sys.argv) == 3: 63 print("Please provide the new version number and the previous one.") 64 sys.exit(1) 65 66 os.chdir('../') 67 68 files_to_update = ['bokehjs/src/coffee/main.coffee', 'bokehjs/package.json'] 69 files_to_add = ['sphinx/source/conf.py'] 70 updated_version = sys.argv[1] 71 last_version = sys.argv[2] 72 73 if check_input(updated_version): 74 sys.exit(1) 75 76 version_update(updated_version, files_to_update) 77 version_add(last_version, files_to_add) 78 ``` Path: `sphinx/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 import os 4 5 # 6 # Bokeh documentation build configuration file, created by 7 # sphinx-quickstart on Sat Oct 12 23:43:03 2013. 8 # 9 # This file is execfile()d with the current directory set to its containing dir. 10 # 11 # Note that not all possible configuration values are present in this 12 # autogenerated file. 13 # 14 # All configuration values have a default; values that are commented out 15 # serve to show the default. 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 #sys.path.insert(0, os.path.abspath('.')) 21 22 # -- General configuration ----------------------------------------------------- 23 24 # If your documentation needs a minimal Sphinx version, state it here. 25 #needs_sphinx = '1.0' 26 27 # Add any Sphinx extension module names here, as strings. They can be extensions 28 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 extensions = [ 30 'sphinx.ext.autodoc', 31 'sphinx.ext.autosummary', 32 'sphinx.ext.graphviz', 33 'sphinx.ext.ifconfig', 34 'sphinx.ext.inheritance_diagram', 35 'sphinx.ext.napoleon', 36 'sphinx.ext.intersphinx', 37 'bokeh.sphinxext.bokeh_autodoc', 38 'bokeh.sphinxext.bokeh_gallery', 39 'bokeh.sphinxext.bokeh_github', 40 'bokeh.sphinxext.bokeh_jinja', 41 'bokeh.sphinxext.bokeh_model', 42 'bokeh.sphinxext.bokeh_palette', 43 'bokeh.sphinxext.bokeh_plot', 44 'bokeh.sphinxext.bokeh_prop', 45 'bokeh.sphinxext.bokeh_sitemap', 46 'bokeh.sphinxext.collapsible_code_block', 47 ] 48 49 # Add any paths that contain templates here, relative to this directory. 50 templates_path = ['_templates'] 51 52 # The suffix of source filenames. 53 source_suffix = '.rst' 54 55 # The encoding of source files. 56 #source_encoding = 'utf-8-sig' 57 58 # The master toctree document. 59 master_doc = 'index' 60 61 # General information about the project. 62 project = 'Bokeh' 63 copyright = '© Copyright 2015, Continuum Analytics.' 64 65 # Get the standard computed Bokeh version string to use for |version| 66 # and |release| 67 from bokeh import __version__ 68 69 # The short X.Y version. 70 version = __version__ 71 # The full version, including alpha/beta/rc tags. 72 release = __version__ 73 74 # Check for version override (e.g. when re-deploying a previously released 75 # docs, or when pushing test docs that do not have a corresponding BokehJS 76 # available on CDN) 77 from bokeh.settings import settings 78 if settings.docs_version(): 79 version = release = settings.docs_version() 80 81 # The language for content autogenerated by Sphinx. Refer to documentation 82 # for a list of supported languages. 83 #language = None 84 85 # There are two options for replacing |today|: either, you set today to some 86 # non-false value, then it is used: 87 #today = '' 88 # Else, today_fmt is used as the format for a strftime call. 89 #today_fmt = '%B %d, %Y' 90 91 # List of patterns, relative to source directory, that match files and 92 # directories to ignore when looking for source files. 93 exclude_patterns = ['_build'] 94 95 # The reST default role (used for this markup: `text`) to use for all documents. 96 #default_role = None 97 98 # If true, '()' will be appended to :func: etc. cross-reference text. 99 #add_function_parentheses = True 100 101 # If true, the current module name will be prepended to all description 102 # unit titles (such as .. function::). 103 add_module_names = False 104 105 # If true, sectionauthor and moduleauthor directives will be shown in the 106 # output. They are ignored by default. 107 #show_authors = False 108 109 # The name of the Pygments (syntax highlighting) style to use. 110 pygments_style = 'sphinx' 111 112 # A list of ignored prefixes for module index sorting. 113 #modindex_common_prefix = [] 114 115 # Sort members by type 116 autodoc_member_order = 'groupwise' 117 118 # -- Options for HTML output --------------------------------------------------- 119 120 # The theme to use for HTML and HTML Help pages. See the documentation for 121 # a list of builtin themes. 122 html_theme = 'bokeh_theme' 123 html_theme_path = ['.'] 124 MAIN_SITE = '//bokehplots.com' 125 126 html_context = { 127 'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed 128 'SITENAME': 'Bokeh Docs', 129 'DESCRIPTION': 'Bokeh visualization library, documentation site.', 130 'AUTHOR': 'Bokeh contributors', 131 'VERSION': version, 132 # Nav 133 'NAV': ( 134 ('About', MAIN_SITE + '/pages/about-bokeh.html'), 135 ('Gallery', '/docs/gallery.html'), 136 ('Docs', '//bokeh.pydata.org/en/latest/'), 137 ('Github', '//github.com/bokeh/bokeh'), 138 ), 139 # Links 140 'LINKS': ( 141 ('FAQs', MAIN_SITE + '/pages/faqs.html'), 142 ('Technical vision', MAIN_SITE + '/pages/technical-vision.html'), 143 ('Roadmap', MAIN_SITE + '/pages/roadmap.html'), 144 ('Citation', MAIN_SITE + '/pages/citation.html'), 145 ), 146 # About Links 147 'ABOUT': ( 148 ('About', MAIN_SITE + '/pages/about-bokeh.html'), 149 ('Team', MAIN_SITE + '/pages/team.html'), 150 ('Contact', MAIN_SITE + '/pages/contact.html'), 151 ), 152 # Social links 153 'SOCIAL': ( 154 ('Contribute', MAIN_SITE + '/pages/contribute.html'), 155 ('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'), 156 ('Github', '//github.com/bokeh/bokeh'), 157 ('Twitter', '//twitter.com/BokehPlots'), 158 ('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw') 159 ), 160 # Links for the docs sub navigation 161 'NAV_DOCS': ( 162 ('Installation', 'installation'), 163 ('User Guide', 'user_guide'), 164 ('Gallery', 'gallery'), 165 ('Reference', 'reference'), 166 ('Releases', 'releases/%s' % version), 167 ('Developer Guide', 'dev_guide'), 168 ), 169 'ALL_VERSIONS': ['0.12.0', '0.11.1', '0.11.0', '0.10.0', '0.9.3', '0.8.2'], 170 'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'), 171 } 172 173 # If true, links to the reST sources are added to the pages. 174 html_show_sourcelink = True 175 176 # Output file base name for HTML help builder. 177 htmlhelp_basename = 'Bokehdoc' 178 179 # -- Options for LaTeX output -------------------------------------------------- 180 181 latex_elements = { 182 # The paper size ('letterpaper' or 'a4paper'). 183 #'papersize': 'letterpaper', 184 185 # The font size ('10pt', '11pt' or '12pt'). 186 #'pointsize': '10pt', 187 188 # Additional stuff for the LaTeX preamble. 189 #'preamble': '', 190 } 191 192 # Grouping the document tree into LaTeX files. List of tuples 193 # (source start file, target name, title, author, documentclass [howto/manual]). 194 latex_documents = [ 195 ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'), 196 ] 197 198 # The name of an image file (relative to this directory) to place at the top of 199 # the title page. 200 #latex_logo = None 201 202 # For "manual" documents, if this is true, then toplevel headings are parts, 203 # not chapters. 204 #latex_use_parts = False 205 206 # If true, show page references after internal links. 207 #latex_show_pagerefs = False 208 209 # If true, show URL addresses after external links. 210 #latex_show_urls = False 211 212 # Documents to append as an appendix to all manuals. 213 #latex_appendices = [] 214 215 # If false, no module index is generated. 216 #latex_domain_indices = True 217 218 219 # -- Options for manual page output -------------------------------------------- 220 221 # One entry per manual page. List of tuples 222 # (source start file, name, description, authors, manual section). 223 man_pages = [ 224 ('index', 'bokeh', u'Bokeh Documentation', 225 [u'Continuum Analytics'], 1) 226 ] 227 228 # If true, show URL addresses after external links. 229 #man_show_urls = False 230 231 232 # -- Options for Texinfo output ------------------------------------------------ 233 234 # Grouping the document tree into Texinfo files. List of tuples 235 # (source start file, target name, title, author, 236 # dir menu entry, description, category) 237 texinfo_documents = [ 238 ('index', 'Bokeh', u'Bokeh Documentation', u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'), 239 ] 240 241 # Documents to append as an appendix to all manuals. 242 #texinfo_appendices = [] 243 244 # If false, no module index is generated. 245 #texinfo_domain_indices = True 246 247 # How to display URL addresses: 'footnote', 'no', or 'inline'. 248 #texinfo_show_urls = 'footnote' 249 250 # intersphinx settings 251 intersphinx_mapping = { 252 'python': ('https://docs.python.org/', None), 253 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None), 254 'numpy': ('http://docs.scipy.org/doc/numpy/', None) 255 } 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/version_update.py b/scripts/version_update.py --- a/scripts/version_update.py +++ b/scripts/version_update.py @@ -42,7 +42,7 @@ if early_ver: print("Version number changed from %s to %s in \n%s" % (early_ver, new_ver, replaced)) -def version_add(old_ver, file_array): +def version_add(new_ver, file_array): """Add last version number in an array of files with a user-supplied last version number""" for ver_file in file_array: @@ -52,11 +52,11 @@ if "ALL_VERSIONS" in line: all_vers = flines[i] begin, end = all_vers.split("[") - all_vers = begin + "['{}', ".format(old_ver) + end + all_vers = begin + "['{}', ".format(new_ver) + end flines[i] = all_vers with open(ver_file, "w") as f: f.writelines(flines) - print("Version number {old_ver} added in {ver_file}".format(old_ver=old_ver, ver_file=ver_file)) + print("Version number {new_ver} added in {ver_file}".format(new_ver=new_ver, ver_file=ver_file)) if __name__ == '__main__': if not len(sys.argv) == 3: @@ -65,7 +65,7 @@ os.chdir('../') - files_to_update = ['bokehjs/src/coffee/main.coffee', 'bokehjs/package.json'] + files_to_update = ['bokehjs/src/coffee/version.coffee', 'bokehjs/package.json'] files_to_add = ['sphinx/source/conf.py'] updated_version = sys.argv[1] last_version = sys.argv[2] @@ -74,4 +74,4 @@ sys.exit(1) version_update(updated_version, files_to_update) - version_add(last_version, files_to_add) + version_add(updated_version, files_to_add) diff --git a/sphinx/source/conf.py b/sphinx/source/conf.py --- a/sphinx/source/conf.py +++ b/sphinx/source/conf.py @@ -166,7 +166,7 @@ ('Releases', 'releases/%s' % version), ('Developer Guide', 'dev_guide'), ), - 'ALL_VERSIONS': ['0.12.0', '0.11.1', '0.11.0', '0.10.0', '0.9.3', '0.8.2'], + 'ALL_VERSIONS': ['0.12.0', '0.11.0', '0.10.0', '0.9.3', '0.8.2'], 'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'), }
{"golden_diff": "diff --git a/scripts/version_update.py b/scripts/version_update.py\n--- a/scripts/version_update.py\n+++ b/scripts/version_update.py\n@@ -42,7 +42,7 @@\n if early_ver:\n print(\"Version number changed from %s to %s in \\n%s\" % (early_ver, new_ver, replaced))\n \n-def version_add(old_ver, file_array):\n+def version_add(new_ver, file_array):\n \"\"\"Add last version number in an array of files\n with a user-supplied last version number\"\"\"\n for ver_file in file_array:\n@@ -52,11 +52,11 @@\n if \"ALL_VERSIONS\" in line:\n all_vers = flines[i]\n begin, end = all_vers.split(\"[\")\n- all_vers = begin + \"['{}', \".format(old_ver) + end\n+ all_vers = begin + \"['{}', \".format(new_ver) + end\n flines[i] = all_vers\n with open(ver_file, \"w\") as f:\n f.writelines(flines)\n- print(\"Version number {old_ver} added in {ver_file}\".format(old_ver=old_ver, ver_file=ver_file))\n+ print(\"Version number {new_ver} added in {ver_file}\".format(new_ver=new_ver, ver_file=ver_file))\n \n if __name__ == '__main__':\n if not len(sys.argv) == 3:\n@@ -65,7 +65,7 @@\n \n os.chdir('../')\n \n- files_to_update = ['bokehjs/src/coffee/main.coffee', 'bokehjs/package.json']\n+ files_to_update = ['bokehjs/src/coffee/version.coffee', 'bokehjs/package.json']\n files_to_add = ['sphinx/source/conf.py']\n updated_version = sys.argv[1]\n last_version = sys.argv[2]\n@@ -74,4 +74,4 @@\n sys.exit(1)\n \n version_update(updated_version, files_to_update)\n- version_add(last_version, files_to_add)\n+ version_add(updated_version, files_to_add)\ndiff --git a/sphinx/source/conf.py b/sphinx/source/conf.py\n--- a/sphinx/source/conf.py\n+++ b/sphinx/source/conf.py\n@@ -166,7 +166,7 @@\n ('Releases', 'releases/%s' % version),\n ('Developer Guide', 'dev_guide'),\n ),\n- 'ALL_VERSIONS': ['0.12.0', '0.11.1', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],\n+ 'ALL_VERSIONS': ['0.12.0', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],\n 'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),\n }\n", "issue": "Warn about version mismatches\nI found myself chasing ghosts in Bokeh this afternoon when I accidentally inlined the wrong JS version into our web application. Perhaps Bokeh could try and detect when there are incompatible versions across the Python/JS interface?\n\n", "before_files": [{"content": "import os\nimport re\nimport sys\n\n\ndef check_input(new_ver):\n \"\"\" Ensure that user input matches the format X.X.X \"\"\"\n\n pat = r'\\d+.\\d+.\\d+'\n if not re.match(pat, new_ver):\n print(\"The new version must be in the format X.X.X (ex. '0.6.0')\")\n return True\n\n\ndef version_update(new_ver, file_array):\n \"\"\" Replace existing version/release number in an array of files\n with a user-supplied version number (new_ver)\"\"\"\n\n pat = r\"\"\"(release|version)([\\\" ][:=] [\\\"\\'])(\\d+.\\d+.\\d+)([\\\"\\'])\"\"\"\n\n # List that will contain any files where the version number was successfully replaced\n replaced = []\n\n # Set as false until a match is found and replaced in the loop below\n early_ver = False\n\n for ver_file in file_array:\n f = open(ver_file)\n text = f.read()\n matchObj = re.search(pat, text)\n f.close()\n\n if matchObj:\n early_ver = matchObj.group(3)\n f = open(ver_file, 'w')\n text = re.sub(pat, r'\\g<1>\\g<2>%s\\g<4>' % new_ver, text)\n f.write(text)\n replaced.append(ver_file)\n else:\n print(\"Unable to find version number matching expected format 'X.X.X' in %s\" % ver_file)\n\n if early_ver:\n print(\"Version number changed from %s to %s in \\n%s\" % (early_ver, new_ver, replaced))\n\ndef version_add(old_ver, file_array):\n \"\"\"Add last version number in an array of files\n with a user-supplied last version number\"\"\"\n for ver_file in file_array:\n with open(ver_file, \"r\") as f:\n flines = f.readlines()\n for i, line in enumerate(flines):\n if \"ALL_VERSIONS\" in line:\n all_vers = flines[i]\n begin, end = all_vers.split(\"[\")\n all_vers = begin + \"['{}', \".format(old_ver) + end\n flines[i] = all_vers\n with open(ver_file, \"w\") as f:\n f.writelines(flines)\n print(\"Version number {old_ver} added in {ver_file}\".format(old_ver=old_ver, ver_file=ver_file))\n\nif __name__ == '__main__':\n if not len(sys.argv) == 3:\n print(\"Please provide the new version number and the previous one.\")\n sys.exit(1)\n\n os.chdir('../')\n\n files_to_update = ['bokehjs/src/coffee/main.coffee', 'bokehjs/package.json']\n files_to_add = ['sphinx/source/conf.py']\n updated_version = sys.argv[1]\n last_version = sys.argv[2]\n\n if check_input(updated_version):\n sys.exit(1)\n\n version_update(updated_version, files_to_update)\n version_add(last_version, files_to_add)\n", "path": "scripts/version_update.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\n\n#\n# Bokeh documentation build configuration file, created by\n# sphinx-quickstart on Sat Oct 12 23:43:03 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.inheritance_diagram',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'bokeh.sphinxext.bokeh_autodoc',\n 'bokeh.sphinxext.bokeh_gallery',\n 'bokeh.sphinxext.bokeh_github',\n 'bokeh.sphinxext.bokeh_jinja',\n 'bokeh.sphinxext.bokeh_model',\n 'bokeh.sphinxext.bokeh_palette',\n 'bokeh.sphinxext.bokeh_plot',\n 'bokeh.sphinxext.bokeh_prop',\n 'bokeh.sphinxext.bokeh_sitemap',\n 'bokeh.sphinxext.collapsible_code_block',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Bokeh'\ncopyright = '\u00a9 Copyright 2015, Continuum Analytics.'\n\n# Get the standard computed Bokeh version string to use for |version|\n# and |release|\nfrom bokeh import __version__\n\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# Check for version override (e.g. when re-deploying a previously released\n# docs, or when pushing test docs that do not have a corresponding BokehJS\n# available on CDN)\nfrom bokeh.settings import settings\nif settings.docs_version():\n version = release = settings.docs_version()\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# Sort members by type\nautodoc_member_order = 'groupwise'\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'bokeh_theme'\nhtml_theme_path = ['.']\nMAIN_SITE = '//bokehplots.com'\n\nhtml_context = {\n 'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed\n 'SITENAME': 'Bokeh Docs',\n 'DESCRIPTION': 'Bokeh visualization library, documentation site.',\n 'AUTHOR': 'Bokeh contributors',\n 'VERSION': version,\n # Nav\n 'NAV': (\n ('About', MAIN_SITE + '/pages/about-bokeh.html'),\n ('Gallery', '/docs/gallery.html'),\n ('Docs', '//bokeh.pydata.org/en/latest/'),\n ('Github', '//github.com/bokeh/bokeh'),\n ),\n # Links\n 'LINKS': (\n ('FAQs', MAIN_SITE + '/pages/faqs.html'),\n ('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),\n ('Roadmap', MAIN_SITE + '/pages/roadmap.html'),\n ('Citation', MAIN_SITE + '/pages/citation.html'),\n ),\n # About Links\n 'ABOUT': (\n ('About', MAIN_SITE + '/pages/about-bokeh.html'),\n ('Team', MAIN_SITE + '/pages/team.html'),\n ('Contact', MAIN_SITE + '/pages/contact.html'),\n ),\n # Social links\n 'SOCIAL': (\n ('Contribute', MAIN_SITE + '/pages/contribute.html'),\n ('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'),\n ('Github', '//github.com/bokeh/bokeh'),\n ('Twitter', '//twitter.com/BokehPlots'),\n ('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')\n ),\n # Links for the docs sub navigation\n 'NAV_DOCS': (\n ('Installation', 'installation'),\n ('User Guide', 'user_guide'),\n ('Gallery', 'gallery'),\n ('Reference', 'reference'),\n ('Releases', 'releases/%s' % version),\n ('Developer Guide', 'dev_guide'),\n ),\n 'ALL_VERSIONS': ['0.12.0', '0.11.1', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],\n 'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),\n}\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Bokehdoc'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bokeh', u'Bokeh Documentation',\n [u'Continuum Analytics'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Bokeh', u'Bokeh Documentation', u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# intersphinx settings\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None)\n}\n", "path": "sphinx/source/conf.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\n\n\ndef check_input(new_ver):\n \"\"\" Ensure that user input matches the format X.X.X \"\"\"\n\n pat = r'\\d+.\\d+.\\d+'\n if not re.match(pat, new_ver):\n print(\"The new version must be in the format X.X.X (ex. '0.6.0')\")\n return True\n\n\ndef version_update(new_ver, file_array):\n \"\"\" Replace existing version/release number in an array of files\n with a user-supplied version number (new_ver)\"\"\"\n\n pat = r\"\"\"(release|version)([\\\" ][:=] [\\\"\\'])(\\d+.\\d+.\\d+)([\\\"\\'])\"\"\"\n\n # List that will contain any files where the version number was successfully replaced\n replaced = []\n\n # Set as false until a match is found and replaced in the loop below\n early_ver = False\n\n for ver_file in file_array:\n f = open(ver_file)\n text = f.read()\n matchObj = re.search(pat, text)\n f.close()\n\n if matchObj:\n early_ver = matchObj.group(3)\n f = open(ver_file, 'w')\n text = re.sub(pat, r'\\g<1>\\g<2>%s\\g<4>' % new_ver, text)\n f.write(text)\n replaced.append(ver_file)\n else:\n print(\"Unable to find version number matching expected format 'X.X.X' in %s\" % ver_file)\n\n if early_ver:\n print(\"Version number changed from %s to %s in \\n%s\" % (early_ver, new_ver, replaced))\n\ndef version_add(new_ver, file_array):\n \"\"\"Add last version number in an array of files\n with a user-supplied last version number\"\"\"\n for ver_file in file_array:\n with open(ver_file, \"r\") as f:\n flines = f.readlines()\n for i, line in enumerate(flines):\n if \"ALL_VERSIONS\" in line:\n all_vers = flines[i]\n begin, end = all_vers.split(\"[\")\n all_vers = begin + \"['{}', \".format(new_ver) + end\n flines[i] = all_vers\n with open(ver_file, \"w\") as f:\n f.writelines(flines)\n print(\"Version number {new_ver} added in {ver_file}\".format(new_ver=new_ver, ver_file=ver_file))\n\nif __name__ == '__main__':\n if not len(sys.argv) == 3:\n print(\"Please provide the new version number and the previous one.\")\n sys.exit(1)\n\n os.chdir('../')\n\n files_to_update = ['bokehjs/src/coffee/version.coffee', 'bokehjs/package.json']\n files_to_add = ['sphinx/source/conf.py']\n updated_version = sys.argv[1]\n last_version = sys.argv[2]\n\n if check_input(updated_version):\n sys.exit(1)\n\n version_update(updated_version, files_to_update)\n version_add(updated_version, files_to_add)\n", "path": "scripts/version_update.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\n\n#\n# Bokeh documentation build configuration file, created by\n# sphinx-quickstart on Sat Oct 12 23:43:03 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.inheritance_diagram',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'bokeh.sphinxext.bokeh_autodoc',\n 'bokeh.sphinxext.bokeh_gallery',\n 'bokeh.sphinxext.bokeh_github',\n 'bokeh.sphinxext.bokeh_jinja',\n 'bokeh.sphinxext.bokeh_model',\n 'bokeh.sphinxext.bokeh_palette',\n 'bokeh.sphinxext.bokeh_plot',\n 'bokeh.sphinxext.bokeh_prop',\n 'bokeh.sphinxext.bokeh_sitemap',\n 'bokeh.sphinxext.collapsible_code_block',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Bokeh'\ncopyright = '\u00a9 Copyright 2015, Continuum Analytics.'\n\n# Get the standard computed Bokeh version string to use for |version|\n# and |release|\nfrom bokeh import __version__\n\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# Check for version override (e.g. when re-deploying a previously released\n# docs, or when pushing test docs that do not have a corresponding BokehJS\n# available on CDN)\nfrom bokeh.settings import settings\nif settings.docs_version():\n version = release = settings.docs_version()\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# Sort members by type\nautodoc_member_order = 'groupwise'\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'bokeh_theme'\nhtml_theme_path = ['.']\nMAIN_SITE = '//bokehplots.com'\n\nhtml_context = {\n 'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed\n 'SITENAME': 'Bokeh Docs',\n 'DESCRIPTION': 'Bokeh visualization library, documentation site.',\n 'AUTHOR': 'Bokeh contributors',\n 'VERSION': version,\n # Nav\n 'NAV': (\n ('About', MAIN_SITE + '/pages/about-bokeh.html'),\n ('Gallery', '/docs/gallery.html'),\n ('Docs', '//bokeh.pydata.org/en/latest/'),\n ('Github', '//github.com/bokeh/bokeh'),\n ),\n # Links\n 'LINKS': (\n ('FAQs', MAIN_SITE + '/pages/faqs.html'),\n ('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),\n ('Roadmap', MAIN_SITE + '/pages/roadmap.html'),\n ('Citation', MAIN_SITE + '/pages/citation.html'),\n ),\n # About Links\n 'ABOUT': (\n ('About', MAIN_SITE + '/pages/about-bokeh.html'),\n ('Team', MAIN_SITE + '/pages/team.html'),\n ('Contact', MAIN_SITE + '/pages/contact.html'),\n ),\n # Social links\n 'SOCIAL': (\n ('Contribute', MAIN_SITE + '/pages/contribute.html'),\n ('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'),\n ('Github', '//github.com/bokeh/bokeh'),\n ('Twitter', '//twitter.com/BokehPlots'),\n ('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')\n ),\n # Links for the docs sub navigation\n 'NAV_DOCS': (\n ('Installation', 'installation'),\n ('User Guide', 'user_guide'),\n ('Gallery', 'gallery'),\n ('Reference', 'reference'),\n ('Releases', 'releases/%s' % version),\n ('Developer Guide', 'dev_guide'),\n ),\n 'ALL_VERSIONS': ['0.12.0', '0.11.0', '0.10.0', '0.9.3', '0.8.2'],\n 'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),\n}\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = True\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Bokehdoc'\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'bokeh', u'Bokeh Documentation',\n [u'Continuum Analytics'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Bokeh', u'Bokeh Documentation', u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python', 'Graphics'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# intersphinx settings\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),\n 'numpy': ('http://docs.scipy.org/doc/numpy/', None)\n}\n", "path": "sphinx/source/conf.py"}]}
3,967
653
gh_patches_debug_6733
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-5894
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "raw" view is not raw, adds extra backslash #### Problem Description I just noticed during https://github.com/mitmproxy/mitmproxy/issues/5469#issuecomment-1191343747 #### Steps to reproduce the behavior: http.txt ``` HTTP/1.1 200 OK Content-Type: text/plain Content-Length: 1 \ ``` 1. `ncat -l 1337 < http.txt` 2. `mitmproxy` 3. `curl --proxy localhost:8080 http://127.0.0.1:1337` raw is `\\`: ![image](https://user-images.githubusercontent.com/679144/180199838-07c02668-5dab-4210-b5b6-aea78066e0fd.png) hex: ![image](https://user-images.githubusercontent.com/679144/180199860-2cdd9455-7846-4921-b67c-6b9c3669b268.png) #### System Information ``` Mitmproxy: 8.1.1 binary Python: 3.10.5 OpenSSL: OpenSSL 3.0.3 3 May 2022 Platform: Linux-5.15.0-41-generic-x86_64-with-glibc2.35 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/contentviews/raw.py` Content: ``` 1 from . import base 2 from mitmproxy.utils import strutils 3 4 5 class ViewRaw(base.View): 6 name = "Raw" 7 8 def __call__(self, data, **metadata): 9 return "Raw", base.format_text(strutils.bytes_to_escaped_str(data, True)) 10 11 def render_priority(self, data: bytes, **metadata) -> float: 12 return 0.1 * float(bool(data)) 13 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/contentviews/raw.py b/mitmproxy/contentviews/raw.py --- a/mitmproxy/contentviews/raw.py +++ b/mitmproxy/contentviews/raw.py @@ -1,12 +1,11 @@ from . import base -from mitmproxy.utils import strutils class ViewRaw(base.View): name = "Raw" def __call__(self, data, **metadata): - return "Raw", base.format_text(strutils.bytes_to_escaped_str(data, True)) + return "Raw", base.format_text(data) def render_priority(self, data: bytes, **metadata) -> float: return 0.1 * float(bool(data))
{"golden_diff": "diff --git a/mitmproxy/contentviews/raw.py b/mitmproxy/contentviews/raw.py\n--- a/mitmproxy/contentviews/raw.py\n+++ b/mitmproxy/contentviews/raw.py\n@@ -1,12 +1,11 @@\n from . import base\n-from mitmproxy.utils import strutils\n \n \n class ViewRaw(base.View):\n name = \"Raw\"\n \n def __call__(self, data, **metadata):\n- return \"Raw\", base.format_text(strutils.bytes_to_escaped_str(data, True))\n+ return \"Raw\", base.format_text(data)\n \n def render_priority(self, data: bytes, **metadata) -> float:\n return 0.1 * float(bool(data))\n", "issue": "\"raw\" view is not raw, adds extra backslash\n#### Problem Description\r\n\r\nI just noticed during https://github.com/mitmproxy/mitmproxy/issues/5469#issuecomment-1191343747\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\nhttp.txt\r\n\r\n```\r\nHTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 1\r\n\r\n\\\r\n```\r\n\r\n1. `ncat -l 1337 < http.txt`\r\n2. `mitmproxy`\r\n3. `curl --proxy localhost:8080 http://127.0.0.1:1337`\r\n\r\nraw is `\\\\`:\r\n\r\n![image](https://user-images.githubusercontent.com/679144/180199838-07c02668-5dab-4210-b5b6-aea78066e0fd.png)\r\n\r\nhex:\r\n\r\n![image](https://user-images.githubusercontent.com/679144/180199860-2cdd9455-7846-4921-b67c-6b9c3669b268.png)\r\n\r\n#### System Information\r\n\r\n```\r\nMitmproxy: 8.1.1 binary\r\nPython: 3.10.5\r\nOpenSSL: OpenSSL 3.0.3 3 May 2022\r\nPlatform: Linux-5.15.0-41-generic-x86_64-with-glibc2.35\r\n```\r\n\n", "before_files": [{"content": "from . import base\nfrom mitmproxy.utils import strutils\n\n\nclass ViewRaw(base.View):\n name = \"Raw\"\n\n def __call__(self, data, **metadata):\n return \"Raw\", base.format_text(strutils.bytes_to_escaped_str(data, True))\n\n def render_priority(self, data: bytes, **metadata) -> float:\n return 0.1 * float(bool(data))\n", "path": "mitmproxy/contentviews/raw.py"}], "after_files": [{"content": "from . import base\n\n\nclass ViewRaw(base.View):\n name = \"Raw\"\n\n def __call__(self, data, **metadata):\n return \"Raw\", base.format_text(data)\n\n def render_priority(self, data: bytes, **metadata) -> float:\n return 0.1 * float(bool(data))\n", "path": "mitmproxy/contentviews/raw.py"}]}
721
151
gh_patches_debug_10976
rasdani/github-patches
git_diff
sagemath__sage-36488
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `func_persist` fails with Python 3.11 ```sage @func_persist def test(n): return n*n ``` fails with ```python AttributeError Traceback (most recent call last) Cell In[1], line 1 ----> 1 @func_persist 2 def test(n): 3 return n*n File /usr/lib/python3.11/site-packages/sage/misc/func_persist.py:60, in func_persist.__init__(self, f, dir) 56 self.__dir = dir 57 os.makedirs(dir, exist_ok=True) 58 self.__doc__ = '%s%s%s' % ( 59 f.__name__, ---> 60 inspect.formatargspec(*inspect.getargs(f.__code__)), 61 f.__doc__) AttributeError: module 'inspect' has no attribute 'formatargspec' ``` This is because `inspect.formatargspec` has been [deprecated in Python 3.5 and removed in 3.11](https://docs.python.org/3/whatsnew/3.11.html#removed). Probably related to #33842. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sage/misc/func_persist.py` Content: ``` 1 r""" 2 Support for persistent functions in .sage files 3 4 Persistent functions are functions whose values are stored on disk 5 so they do not have to be recomputed. 6 7 The inputs to the function must be hashable (so lists are not 8 allowed). Though a hash is used, in the incredibly unlikely event 9 that a hash collision occurs, your function will not return an 10 incorrect result because of this (though the cache might not be 11 used either). 12 13 This is meant to be used from ``.sage`` files, not from 14 library ``.py`` files. 15 16 To use this disk caching mechanism, just put 17 ``@func_persist`` right before your function 18 definition. For example, 19 20 :: 21 22 @func_persist 23 def bern(n): 24 "Return the n-th Bernoulli number, caching the result to disk." 25 return bernoulli(n) 26 27 You can then use the function ``bern`` as usual, except 28 it will almost instantly return values that have already been 29 computed, even if you quit and restart. 30 31 The disk cache files are stored by default in the subdirectory 32 ``func_persist`` of the current working directory, 33 with one file for each evaluation of the function. 34 """ 35 ######################################################################## 36 # Copyright (C) 2006 William Stein <[email protected]> 37 # 38 # Distributed under the terms of the GNU General Public License (GPL) 39 # 40 # https://www.gnu.org/licenses/ 41 ######################################################################## 42 43 import inspect 44 import os 45 46 from . import persist 47 48 49 class func_persist: 50 r""" 51 Put ``@func_persist`` right before your function 52 definition to cache values it computes to disk. 53 """ 54 def __init__(self, f, dir='func_persist'): 55 self.__func = f 56 self.__dir = dir 57 os.makedirs(dir, exist_ok=True) 58 self.__doc__ = '%s%s%s' % ( 59 f.__name__, 60 inspect.formatargspec(*inspect.getargs(f.__code__)), 61 f.__doc__) 62 63 def __call__(self, *args, **kwds): 64 key = (tuple(args), tuple(kwds.items())) 65 h = hash(key) 66 name = '%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h) 67 68 if os.path.exists(name): 69 key2, val = persist.load(name) 70 if key == key2: 71 # We save and test equality of keys to avoid 72 # the (extremely remote) possibility of a hash 73 # collision. Correctness is crucial in mathematics. 74 return val 75 76 val = self.__func(*args, **kwds) 77 persist.save((key, val), name) 78 return val 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sage/misc/func_persist.py b/src/sage/misc/func_persist.py --- a/src/sage/misc/func_persist.py +++ b/src/sage/misc/func_persist.py @@ -45,7 +45,6 @@ from . import persist - class func_persist: r""" Put ``@func_persist`` right before your function @@ -57,7 +56,7 @@ os.makedirs(dir, exist_ok=True) self.__doc__ = '%s%s%s' % ( f.__name__, - inspect.formatargspec(*inspect.getargs(f.__code__)), + inspect.signature(f), f.__doc__) def __call__(self, *args, **kwds):
{"golden_diff": "diff --git a/src/sage/misc/func_persist.py b/src/sage/misc/func_persist.py\n--- a/src/sage/misc/func_persist.py\n+++ b/src/sage/misc/func_persist.py\n@@ -45,7 +45,6 @@\n \n from . import persist\n \n-\n class func_persist:\n r\"\"\"\n Put ``@func_persist`` right before your function\n@@ -57,7 +56,7 @@\n os.makedirs(dir, exist_ok=True)\n self.__doc__ = '%s%s%s' % (\n f.__name__,\n- inspect.formatargspec(*inspect.getargs(f.__code__)),\n+ inspect.signature(f),\n f.__doc__)\n \n def __call__(self, *args, **kwds):\n", "issue": "`func_persist` fails with Python 3.11\n```sage\n@func_persist\ndef test(n):\n return n*n\n```\n\nfails with\n\n```python\nAttributeError Traceback (most recent call last)\nCell In[1], line 1\n----> 1 @func_persist\n 2 def test(n):\n 3 return n*n\n\nFile /usr/lib/python3.11/site-packages/sage/misc/func_persist.py:60, in func_persist.__init__(self, f, dir)\n 56 self.__dir = dir\n 57 os.makedirs(dir, exist_ok=True)\n 58 self.__doc__ = '%s%s%s' % (\n 59 f.__name__,\n---> 60 inspect.formatargspec(*inspect.getargs(f.__code__)),\n 61 f.__doc__)\n\nAttributeError: module 'inspect' has no attribute 'formatargspec'\n```\n\nThis is because `inspect.formatargspec` has been [deprecated in Python 3.5 and removed in 3.11](https://docs.python.org/3/whatsnew/3.11.html#removed). Probably related to #33842.\n\n", "before_files": [{"content": "r\"\"\"\nSupport for persistent functions in .sage files\n\nPersistent functions are functions whose values are stored on disk\nso they do not have to be recomputed.\n\nThe inputs to the function must be hashable (so lists are not\nallowed). Though a hash is used, in the incredibly unlikely event\nthat a hash collision occurs, your function will not return an\nincorrect result because of this (though the cache might not be\nused either).\n\nThis is meant to be used from ``.sage`` files, not from\nlibrary ``.py`` files.\n\nTo use this disk caching mechanism, just put\n``@func_persist`` right before your function\ndefinition. For example,\n\n::\n\n @func_persist\n def bern(n):\n \"Return the n-th Bernoulli number, caching the result to disk.\"\n return bernoulli(n)\n\nYou can then use the function ``bern`` as usual, except\nit will almost instantly return values that have already been\ncomputed, even if you quit and restart.\n\nThe disk cache files are stored by default in the subdirectory\n``func_persist`` of the current working directory,\nwith one file for each evaluation of the function.\n\"\"\"\n########################################################################\n# Copyright (C) 2006 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# https://www.gnu.org/licenses/\n########################################################################\n\nimport inspect\nimport os\n\nfrom . import persist\n\n\nclass func_persist:\n r\"\"\"\n Put ``@func_persist`` right before your function\n definition to cache values it computes to disk.\n \"\"\"\n def __init__(self, f, dir='func_persist'):\n self.__func = f\n self.__dir = dir\n os.makedirs(dir, exist_ok=True)\n self.__doc__ = '%s%s%s' % (\n f.__name__,\n inspect.formatargspec(*inspect.getargs(f.__code__)),\n f.__doc__)\n\n def __call__(self, *args, **kwds):\n key = (tuple(args), tuple(kwds.items()))\n h = hash(key)\n name = '%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h)\n\n if os.path.exists(name):\n key2, val = persist.load(name)\n if key == key2:\n # We save and test equality of keys to avoid\n # the (extremely remote) possibility of a hash\n # collision. Correctness is crucial in mathematics.\n return val\n\n val = self.__func(*args, **kwds)\n persist.save((key, val), name)\n return val\n", "path": "src/sage/misc/func_persist.py"}], "after_files": [{"content": "r\"\"\"\nSupport for persistent functions in .sage files\n\nPersistent functions are functions whose values are stored on disk\nso they do not have to be recomputed.\n\nThe inputs to the function must be hashable (so lists are not\nallowed). Though a hash is used, in the incredibly unlikely event\nthat a hash collision occurs, your function will not return an\nincorrect result because of this (though the cache might not be\nused either).\n\nThis is meant to be used from ``.sage`` files, not from\nlibrary ``.py`` files.\n\nTo use this disk caching mechanism, just put\n``@func_persist`` right before your function\ndefinition. For example,\n\n::\n\n @func_persist\n def bern(n):\n \"Return the n-th Bernoulli number, caching the result to disk.\"\n return bernoulli(n)\n\nYou can then use the function ``bern`` as usual, except\nit will almost instantly return values that have already been\ncomputed, even if you quit and restart.\n\nThe disk cache files are stored by default in the subdirectory\n``func_persist`` of the current working directory,\nwith one file for each evaluation of the function.\n\"\"\"\n########################################################################\n# Copyright (C) 2006 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# https://www.gnu.org/licenses/\n########################################################################\n\nimport inspect\nimport os\n\nfrom . import persist\n\nclass func_persist:\n r\"\"\"\n Put ``@func_persist`` right before your function\n definition to cache values it computes to disk.\n \"\"\"\n def __init__(self, f, dir='func_persist'):\n self.__func = f\n self.__dir = dir\n os.makedirs(dir, exist_ok=True)\n self.__doc__ = '%s%s%s' % (\n f.__name__,\n inspect.signature(f),\n f.__doc__)\n\n def __call__(self, *args, **kwds):\n key = (tuple(args), tuple(kwds.items()))\n h = hash(key)\n name = '%s/%s_%s.sobj' % (self.__dir, self.__func.__name__, h)\n\n if os.path.exists(name):\n key2, val = persist.load(name)\n if key == key2:\n # We save and test equality of keys to avoid\n # the (extremely remote) possibility of a hash\n # collision. Correctness is crucial in mathematics.\n return val\n\n val = self.__func(*args, **kwds)\n persist.save((key, val), name)\n return val\n", "path": "src/sage/misc/func_persist.py"}]}
1,262
168
gh_patches_debug_30376
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-230
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement filters for schema list API **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Users should be able to: - get schemas associated with a given database key from the schema list API. - filter schemas by name. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> Schema list API should take name and database filter parameters. **Additional context** <!-- Add any other context or screenshots about the feature request here.--> We should use `django-filter` for this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mathesar/views/api.py` Content: ``` 1 from rest_framework import status, viewsets 2 from rest_framework.exceptions import NotFound, ValidationError 3 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin 4 from rest_framework.response import Response 5 from django_filters import rest_framework as filters 6 7 from mathesar.database.utils import get_non_default_database_keys 8 from mathesar.models import Table, Schema, DataFile 9 from mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination 10 from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer 11 from mathesar.utils.schemas import create_schema_and_object 12 from mathesar.utils.api import create_table_from_datafile 13 from mathesar.filters import TableFilter 14 15 16 class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): 17 queryset = Schema.objects.all().order_by('-created_at') 18 serializer_class = SchemaSerializer 19 pagination_class = DefaultLimitOffsetPagination 20 21 def create(self, request): 22 schema = create_schema_and_object(request.data['name'], request.data['database']) 23 serializer = SchemaSerializer(schema) 24 return Response(serializer.data, status=status.HTTP_201_CREATED) 25 26 27 class TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, 28 CreateModelMixin): 29 queryset = Table.objects.all().order_by('-created_at') 30 serializer_class = TableSerializer 31 pagination_class = DefaultLimitOffsetPagination 32 filter_backends = (filters.DjangoFilterBackend,) 33 filterset_class = TableFilter 34 35 def create(self, request): 36 serializer = TableSerializer(data=request.data, 37 context={'request': request}) 38 if serializer.is_valid(): 39 return create_table_from_datafile(request, serializer.data) 40 else: 41 raise ValidationError(serializer.errors) 42 43 44 class RecordViewSet(viewsets.ViewSet): 45 # There is no "update" method. 46 # We're not supporting PUT requests because there aren't a lot of use cases 47 # where the entire record needs to be replaced, PATCH suffices for updates. 48 queryset = Table.objects.all().order_by('-created_at') 49 50 def list(self, request, table_pk=None): 51 paginator = TableLimitOffsetPagination() 52 records = paginator.paginate_queryset(self.queryset, request, table_pk) 53 serializer = RecordSerializer(records, many=True) 54 return paginator.get_paginated_response(serializer.data) 55 56 def retrieve(self, request, pk=None, table_pk=None): 57 table = Table.objects.get(id=table_pk) 58 record = table.get_record(pk) 59 if not record: 60 raise NotFound 61 serializer = RecordSerializer(record) 62 return Response(serializer.data) 63 64 def create(self, request, table_pk=None): 65 table = Table.objects.get(id=table_pk) 66 # We only support adding a single record through the API. 67 assert isinstance((request.data), dict) 68 record = table.create_record_or_records(request.data) 69 serializer = RecordSerializer(record) 70 return Response(serializer.data, status=status.HTTP_201_CREATED) 71 72 def partial_update(self, request, pk=None, table_pk=None): 73 table = Table.objects.get(id=table_pk) 74 record = table.update_record(pk, request.data) 75 serializer = RecordSerializer(record) 76 return Response(serializer.data) 77 78 def destroy(self, request, pk=None, table_pk=None): 79 table = Table.objects.get(id=table_pk) 80 table.delete_record(pk) 81 return Response(status=status.HTTP_204_NO_CONTENT) 82 83 84 class DatabaseKeyViewSet(viewsets.ViewSet): 85 def list(self, request): 86 return Response(get_non_default_database_keys()) 87 88 89 class DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin): 90 queryset = DataFile.objects.all().order_by('-created_at') 91 serializer_class = DataFileSerializer 92 pagination_class = DefaultLimitOffsetPagination 93 ``` Path: `mathesar/filters.py` Content: ``` 1 from django_filters import rest_framework as filters 2 3 from mathesar.models import Table 4 5 6 class CharInFilter(filters.BaseInFilter, filters.CharFilter): 7 pass 8 9 10 class TableFilter(filters.FilterSet): 11 name = CharInFilter(field_name='name', lookup_expr='in') 12 13 class Meta: 14 model = Table 15 fields = ['name'] 16 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mathesar/filters.py b/mathesar/filters.py --- a/mathesar/filters.py +++ b/mathesar/filters.py @@ -1,12 +1,21 @@ from django_filters import rest_framework as filters -from mathesar.models import Table +from mathesar.models import Schema, Table class CharInFilter(filters.BaseInFilter, filters.CharFilter): pass +class SchemaFilter(filters.FilterSet): + name = CharInFilter(field_name='name', lookup_expr='in') + database = CharInFilter(field_name='database', lookup_expr='in') + + class Meta: + model = Schema + fields = ['name', 'database'] + + class TableFilter(filters.FilterSet): name = CharInFilter(field_name='name', lookup_expr='in') diff --git a/mathesar/views/api.py b/mathesar/views/api.py --- a/mathesar/views/api.py +++ b/mathesar/views/api.py @@ -10,13 +10,15 @@ from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer from mathesar.utils.schemas import create_schema_and_object from mathesar.utils.api import create_table_from_datafile -from mathesar.filters import TableFilter +from mathesar.filters import SchemaFilter, TableFilter class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): queryset = Schema.objects.all().order_by('-created_at') serializer_class = SchemaSerializer pagination_class = DefaultLimitOffsetPagination + filter_backends = (filters.DjangoFilterBackend,) + filterset_class = SchemaFilter def create(self, request): schema = create_schema_and_object(request.data['name'], request.data['database'])
{"golden_diff": "diff --git a/mathesar/filters.py b/mathesar/filters.py\n--- a/mathesar/filters.py\n+++ b/mathesar/filters.py\n@@ -1,12 +1,21 @@\n from django_filters import rest_framework as filters\n \n-from mathesar.models import Table\n+from mathesar.models import Schema, Table\n \n \n class CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n \n \n+class SchemaFilter(filters.FilterSet):\n+ name = CharInFilter(field_name='name', lookup_expr='in')\n+ database = CharInFilter(field_name='database', lookup_expr='in')\n+\n+ class Meta:\n+ model = Schema\n+ fields = ['name', 'database']\n+\n+\n class TableFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n \ndiff --git a/mathesar/views/api.py b/mathesar/views/api.py\n--- a/mathesar/views/api.py\n+++ b/mathesar/views/api.py\n@@ -10,13 +10,15 @@\n from mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\n from mathesar.utils.schemas import create_schema_and_object\n from mathesar.utils.api import create_table_from_datafile\n-from mathesar.filters import TableFilter\n+from mathesar.filters import SchemaFilter, TableFilter\n \n \n class SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n queryset = Schema.objects.all().order_by('-created_at')\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n+ filter_backends = (filters.DjangoFilterBackend,)\n+ filterset_class = SchemaFilter\n \n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n", "issue": "Implement filters for schema list API\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers should be able to:\r\n- get schemas associated with a given database key from the schema list API.\r\n- filter schemas by name.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nSchema list API should take name and database filter parameters.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nWe should use `django-filter` for this.\n", "before_files": [{"content": "from rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom django_filters import rest_framework as filters\n\nfrom mathesar.database.utils import get_non_default_database_keys\nfrom mathesar.models import Table, Schema, DataFile\nfrom mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination\nfrom mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.utils.api import create_table_from_datafile\nfrom mathesar.filters import TableFilter\n\n\nclass SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n queryset = Schema.objects.all().order_by('-created_at')\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,\n CreateModelMixin):\n queryset = Table.objects.all().order_by('-created_at')\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def create(self, request):\n serializer = TableSerializer(data=request.data,\n context={'request': request})\n if serializer.is_valid():\n return create_table_from_datafile(request, serializer.data)\n else:\n raise ValidationError(serializer.errors)\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no \"update\" method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n queryset = Table.objects.all().order_by('-created_at')\n\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n records = paginator.paginate_queryset(self.queryset, request, table_pk)\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = Table.objects.get(id=table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n record = table.create_record_or_records(request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass DatabaseKeyViewSet(viewsets.ViewSet):\n def list(self, request):\n return Response(get_non_default_database_keys())\n\n\nclass DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin):\n queryset = DataFile.objects.all().order_by('-created_at')\n serializer_class = DataFileSerializer\n pagination_class = DefaultLimitOffsetPagination\n", "path": "mathesar/views/api.py"}, {"content": "from django_filters import rest_framework as filters\n\nfrom mathesar.models import Table\n\n\nclass CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n\n\nclass TableFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Table\n fields = ['name']\n", "path": "mathesar/filters.py"}], "after_files": [{"content": "from rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom django_filters import rest_framework as filters\n\nfrom mathesar.database.utils import get_non_default_database_keys\nfrom mathesar.models import Table, Schema, DataFile\nfrom mathesar.pagination import DefaultLimitOffsetPagination, TableLimitOffsetPagination\nfrom mathesar.serializers import TableSerializer, SchemaSerializer, RecordSerializer, DataFileSerializer\nfrom mathesar.utils.schemas import create_schema_and_object\nfrom mathesar.utils.api import create_table_from_datafile\nfrom mathesar.filters import SchemaFilter, TableFilter\n\n\nclass SchemaViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n queryset = Schema.objects.all().order_by('-created_at')\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n\n def create(self, request):\n schema = create_schema_and_object(request.data['name'], request.data['database'])\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass TableViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin,\n CreateModelMixin):\n queryset = Table.objects.all().order_by('-created_at')\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def create(self, request):\n serializer = TableSerializer(data=request.data,\n context={'request': request})\n if serializer.is_valid():\n return create_table_from_datafile(request, serializer.data)\n else:\n raise ValidationError(serializer.errors)\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no \"update\" method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n queryset = Table.objects.all().order_by('-created_at')\n\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n records = paginator.paginate_queryset(self.queryset, request, table_pk)\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = Table.objects.get(id=table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n record = table.create_record_or_records(request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = Table.objects.get(id=table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass DatabaseKeyViewSet(viewsets.ViewSet):\n def list(self, request):\n return Response(get_non_default_database_keys())\n\n\nclass DataFileViewSet(viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin, CreateModelMixin):\n queryset = DataFile.objects.all().order_by('-created_at')\n serializer_class = DataFileSerializer\n pagination_class = DefaultLimitOffsetPagination\n", "path": "mathesar/views/api.py"}, {"content": "from django_filters import rest_framework as filters\n\nfrom mathesar.models import Schema, Table\n\n\nclass CharInFilter(filters.BaseInFilter, filters.CharFilter):\n pass\n\n\nclass SchemaFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n database = CharInFilter(field_name='database', lookup_expr='in')\n\n class Meta:\n model = Schema\n fields = ['name', 'database']\n\n\nclass TableFilter(filters.FilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Table\n fields = ['name']\n", "path": "mathesar/filters.py"}]}
1,462
379
gh_patches_debug_60834
rasdani/github-patches
git_diff
pytorch__text-75
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- max_size vocab is not consistent. **Context:** Num field includes the numbers 0 - 9. I set `max_size=10`. Then I print the vocab that was built: ``` num_field.build_vocab(train, max_size=10) print(num_field.vocab.itos) # ['<unk>', '<pad>', '<s>', '</s>', u'1', u'2'] print(len(num_field.vocab.itos)) # 6 ``` Then I checked the `words` created from tokenization: ``` print(words) # [(u'1', 11308), (u'2', 11270), (u'9', 11058), (u'0', 11020), (u'5', 10952), (u'4', 10942), (u'6', 10914), (u'8', 10820), (u'3', 10766), (u'7', 10706), ('</s>', 0), ('<pad>', 0), ('<s>', 0), ('<unk>', 0)] ``` Looks like the vocab built includes only 6 tokens yet the max_size is 10 while there are 14 possible tokens. **Problem:** If the number of tokens is larger than `max_size`, `build_vocab` does not fill up the vocabulary up till `max_size`. **Possible Solution:** Update https://github.com/pytorch/text/blob/master/torchtext/vocab.py#L129 to not subtract `len(self.itos)` from `max_size`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchtext/vocab.py` Content: ``` 1 from __future__ import print_function 2 import array 3 from collections import defaultdict 4 import os 5 import zipfile 6 7 import six 8 from six.moves.urllib.request import urlretrieve 9 import torch 10 from tqdm import trange, tqdm 11 12 from .utils import reporthook 13 14 URL = { 15 'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip', 16 'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip', 17 'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip', 18 'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip' 19 } 20 21 22 def load_word_vectors(root, wv_type, dim): 23 """Load word vectors from a path, trying .pt, .txt, and .zip extensions.""" 24 if isinstance(dim, int): 25 dim = str(dim) + 'd' 26 fname = os.path.join(root, wv_type + '.' + dim) 27 if os.path.isfile(fname + '.pt'): 28 fname_pt = fname + '.pt' 29 print('loading word vectors from', fname_pt) 30 return torch.load(fname_pt) 31 if os.path.isfile(fname + '.txt'): 32 fname_txt = fname + '.txt' 33 cm = open(fname_txt, 'rb') 34 cm = [line for line in cm] 35 elif os.path.basename(wv_type) in URL: 36 url = URL[wv_type] 37 print('downloading word vectors from {}'.format(url)) 38 filename = os.path.basename(fname) 39 if not os.path.exists(root): 40 os.makedirs(root) 41 with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: 42 fname, _ = urlretrieve(url, fname, reporthook=reporthook(t)) 43 with zipfile.ZipFile(fname, "r") as zf: 44 print('extracting word vectors into {}'.format(root)) 45 zf.extractall(root) 46 if not os.path.isfile(fname + '.txt'): 47 raise RuntimeError('no word vectors of requested dimension found') 48 return load_word_vectors(root, wv_type, dim) 49 else: 50 raise RuntimeError('unable to load word vectors') 51 52 wv_tokens, wv_arr, wv_size = [], array.array('d'), None 53 if cm is not None: 54 print("Loading word vectors from {}".format(fname_txt)) 55 for line in trange(len(cm)): 56 entries = cm[line].strip().split(b' ') 57 word, entries = entries[0], entries[1:] 58 if wv_size is None: 59 wv_size = len(entries) 60 try: 61 if isinstance(word, six.binary_type): 62 word = word.decode('utf-8') 63 except: 64 print('non-UTF8 token', repr(word), 'ignored') 65 continue 66 wv_arr.extend(float(x) for x in entries) 67 wv_tokens.append(word) 68 69 wv_dict = {word: i for i, word in enumerate(wv_tokens)} 70 wv_arr = torch.Tensor(wv_arr).view(-1, wv_size) 71 ret = (wv_dict, wv_arr, wv_size) 72 torch.save(ret, fname + '.pt') 73 return ret 74 75 76 class Vocab(object): 77 """Defines a vocabulary object that will be used to numericalize a field. 78 79 Attributes: 80 freqs: A collections.Counter object holding the frequencies of tokens 81 in the data used to build the Vocab. 82 stoi: A collections.defaultdict instance mapping token strings to 83 numerical identifiers. 84 itos: A list of token strings indexed by their numerical identifiers. 85 vectors: A Tensor containing word vectors for the tokens in the Vocab, 86 if a word vector file has been provided. 87 """ 88 89 def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(), 90 wv_type=None, wv_dim=300, unk_init='random', 91 specials=['<pad>'], fill_from_vectors=False): 92 """Create a Vocab object from a collections.Counter. 93 94 Arguments: 95 counter: collections.Counter object holding the frequencies of 96 each value found in the data. 97 max_size: The maximum size of the vocabulary, or None for no 98 maximum. Default: None. 99 min_freq: The minimum frequency needed to include a token in the 100 vocabulary. Default: 1. 101 wv_dir: directory containing word vector file and destination for 102 downloaded word vector files 103 wv_type: type of word vectors; None for no word vectors 104 wv_dim: dimension of word vectors 105 specials: The list of special tokens (e.g., padding or eos) that 106 will be prepended to the vocabulary in addition to an <unk> 107 token. 108 fill_from_vectors: Whether to add to the vocabulary every token 109 for which a word vector specified by vectors is present 110 even if the token does not appear in the provided data. 111 unk_init: default to random initialization for word vectors not in the 112 pretrained word vector file; otherwise set to zero 113 """ 114 self.freqs = counter.copy() 115 self.unk_init = unk_init 116 counter.update(['<unk>'] + specials) 117 118 if wv_type is not None: 119 wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim) 120 121 if fill_from_vectors: 122 counter.update(wv_dict.keys()) 123 124 self.stoi = defaultdict(lambda: 0) 125 self.stoi.update({tok: i + 1 for i, tok in enumerate(specials)}) 126 self.itos = ['<unk>'] + specials 127 128 counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials}) 129 max_size = None if max_size is None else max_size - len(self.itos) 130 131 # sort by frequency, then alphabetically 132 words = sorted(counter.items(), key=lambda tup: tup[0]) 133 words.sort(key=lambda tup: tup[1], reverse=True) 134 135 for k, v in words: 136 if v < min_freq or len(self.itos) == max_size: 137 break 138 self.itos.append(k) 139 self.stoi[k] = len(self.itos) - 1 140 141 if wv_type is not None: 142 self.set_vectors(wv_dict, wv_arr) 143 144 def __len__(self): 145 return len(self.itos) 146 147 def load_vectors(self, wv_dir=os.getcwd(), wv_type=None, wv_dim=300, 148 unk_init='random'): 149 """Loads word vectors into the vocab 150 151 Arguments: 152 wv_dir: directory containing word vector file and destination for 153 downloaded word vector files 154 wv_type: type of word vectors; None for no word vectors 155 wv_dim: dimension of word vectors 156 157 unk_init: default to random initialization for unknown word vectors; 158 otherwise set to zero 159 """ 160 self.unk_init = unk_init 161 wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim) 162 self.set_vectors(wv_dict, wv_arr) 163 164 def set_vectors(self, wv_dict, wv_arr): 165 self.vectors = torch.Tensor(len(self), self.wv_size) 166 self.vectors.normal_(0, 1) if self.unk_init == 'random' else self.vectors.zero_() 167 for i, token in enumerate(self.itos): 168 wv_index = wv_dict.get(token, None) 169 if wv_index is not None: 170 self.vectors[i] = wv_arr[wv_index] 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchtext/vocab.py b/torchtext/vocab.py --- a/torchtext/vocab.py +++ b/torchtext/vocab.py @@ -126,7 +126,7 @@ self.itos = ['<unk>'] + specials counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials}) - max_size = None if max_size is None else max_size - len(self.itos) + max_size = None if max_size is None else max_size + len(self.itos) # sort by frequency, then alphabetically words = sorted(counter.items(), key=lambda tup: tup[0])
{"golden_diff": "diff --git a/torchtext/vocab.py b/torchtext/vocab.py\n--- a/torchtext/vocab.py\n+++ b/torchtext/vocab.py\n@@ -126,7 +126,7 @@\n self.itos = ['<unk>'] + specials\n \n counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})\n- max_size = None if max_size is None else max_size - len(self.itos)\n+ max_size = None if max_size is None else max_size + len(self.itos)\n \n # sort by frequency, then alphabetically\n words = sorted(counter.items(), key=lambda tup: tup[0])\n", "issue": "max_size vocab is not consistent. \n**Context:**\r\nNum field includes the numbers 0 - 9. I set `max_size=10`. Then I print the vocab that was built:\r\n\r\n```\r\n num_field.build_vocab(train, max_size=10)\r\n print(num_field.vocab.itos)\r\n # ['<unk>', '<pad>', '<s>', '</s>', u'1', u'2']\r\n print(len(num_field.vocab.itos))\r\n # 6\r\n```\r\n\r\nThen I checked the `words` created from tokenization:\r\n```\r\nprint(words)\r\n# [(u'1', 11308), (u'2', 11270), (u'9', 11058), (u'0', 11020), (u'5', 10952), (u'4', 10942), (u'6', 10914), (u'8', 10820), (u'3', 10766), (u'7', 10706), ('</s>', 0), ('<pad>', 0), ('<s>', 0), ('<unk>', 0)]\r\n```\r\n\r\nLooks like the vocab built includes only 6 tokens yet the max_size is 10 while there are 14 possible tokens.\r\n\r\n**Problem:**\r\nIf the number of tokens is larger than `max_size`, `build_vocab` does not fill up the vocabulary up till `max_size`.\r\n\r\n**Possible Solution:**\r\nUpdate https://github.com/pytorch/text/blob/master/torchtext/vocab.py#L129 to not subtract `len(self.itos)` from `max_size`.\r\n\n", "before_files": [{"content": "from __future__ import print_function\nimport array\nfrom collections import defaultdict\nimport os\nimport zipfile\n\nimport six\nfrom six.moves.urllib.request import urlretrieve\nimport torch\nfrom tqdm import trange, tqdm\n\nfrom .utils import reporthook\n\nURL = {\n 'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',\n 'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',\n 'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',\n 'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'\n}\n\n\ndef load_word_vectors(root, wv_type, dim):\n \"\"\"Load word vectors from a path, trying .pt, .txt, and .zip extensions.\"\"\"\n if isinstance(dim, int):\n dim = str(dim) + 'd'\n fname = os.path.join(root, wv_type + '.' + dim)\n if os.path.isfile(fname + '.pt'):\n fname_pt = fname + '.pt'\n print('loading word vectors from', fname_pt)\n return torch.load(fname_pt)\n if os.path.isfile(fname + '.txt'):\n fname_txt = fname + '.txt'\n cm = open(fname_txt, 'rb')\n cm = [line for line in cm]\n elif os.path.basename(wv_type) in URL:\n url = URL[wv_type]\n print('downloading word vectors from {}'.format(url))\n filename = os.path.basename(fname)\n if not os.path.exists(root):\n os.makedirs(root)\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))\n with zipfile.ZipFile(fname, \"r\") as zf:\n print('extracting word vectors into {}'.format(root))\n zf.extractall(root)\n if not os.path.isfile(fname + '.txt'):\n raise RuntimeError('no word vectors of requested dimension found')\n return load_word_vectors(root, wv_type, dim)\n else:\n raise RuntimeError('unable to load word vectors')\n\n wv_tokens, wv_arr, wv_size = [], array.array('d'), None\n if cm is not None:\n print(\"Loading word vectors from {}\".format(fname_txt))\n for line in trange(len(cm)):\n entries = cm[line].strip().split(b' ')\n word, entries = entries[0], entries[1:]\n if wv_size is None:\n wv_size = len(entries)\n try:\n if isinstance(word, six.binary_type):\n word = word.decode('utf-8')\n except:\n print('non-UTF8 token', repr(word), 'ignored')\n continue\n wv_arr.extend(float(x) for x in entries)\n wv_tokens.append(word)\n\n wv_dict = {word: i for i, word in enumerate(wv_tokens)}\n wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)\n ret = (wv_dict, wv_arr, wv_size)\n torch.save(ret, fname + '.pt')\n return ret\n\n\nclass Vocab(object):\n \"\"\"Defines a vocabulary object that will be used to numericalize a field.\n\n Attributes:\n freqs: A collections.Counter object holding the frequencies of tokens\n in the data used to build the Vocab.\n stoi: A collections.defaultdict instance mapping token strings to\n numerical identifiers.\n itos: A list of token strings indexed by their numerical identifiers.\n vectors: A Tensor containing word vectors for the tokens in the Vocab,\n if a word vector file has been provided.\n \"\"\"\n\n def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(),\n wv_type=None, wv_dim=300, unk_init='random',\n specials=['<pad>'], fill_from_vectors=False):\n \"\"\"Create a Vocab object from a collections.Counter.\n\n Arguments:\n counter: collections.Counter object holding the frequencies of\n each value found in the data.\n max_size: The maximum size of the vocabulary, or None for no\n maximum. Default: None.\n min_freq: The minimum frequency needed to include a token in the\n vocabulary. Default: 1.\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n specials: The list of special tokens (e.g., padding or eos) that\n will be prepended to the vocabulary in addition to an <unk>\n token.\n fill_from_vectors: Whether to add to the vocabulary every token\n for which a word vector specified by vectors is present\n even if the token does not appear in the provided data.\n unk_init: default to random initialization for word vectors not in the\n pretrained word vector file; otherwise set to zero\n \"\"\"\n self.freqs = counter.copy()\n self.unk_init = unk_init\n counter.update(['<unk>'] + specials)\n\n if wv_type is not None:\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n\n if fill_from_vectors:\n counter.update(wv_dict.keys())\n\n self.stoi = defaultdict(lambda: 0)\n self.stoi.update({tok: i + 1 for i, tok in enumerate(specials)})\n self.itos = ['<unk>'] + specials\n\n counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})\n max_size = None if max_size is None else max_size - len(self.itos)\n\n # sort by frequency, then alphabetically\n words = sorted(counter.items(), key=lambda tup: tup[0])\n words.sort(key=lambda tup: tup[1], reverse=True)\n\n for k, v in words:\n if v < min_freq or len(self.itos) == max_size:\n break\n self.itos.append(k)\n self.stoi[k] = len(self.itos) - 1\n\n if wv_type is not None:\n self.set_vectors(wv_dict, wv_arr)\n\n def __len__(self):\n return len(self.itos)\n\n def load_vectors(self, wv_dir=os.getcwd(), wv_type=None, wv_dim=300,\n unk_init='random'):\n \"\"\"Loads word vectors into the vocab\n\n Arguments:\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n\n unk_init: default to random initialization for unknown word vectors;\n otherwise set to zero\n \"\"\"\n self.unk_init = unk_init\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n self.set_vectors(wv_dict, wv_arr)\n\n def set_vectors(self, wv_dict, wv_arr):\n self.vectors = torch.Tensor(len(self), self.wv_size)\n self.vectors.normal_(0, 1) if self.unk_init == 'random' else self.vectors.zero_()\n for i, token in enumerate(self.itos):\n wv_index = wv_dict.get(token, None)\n if wv_index is not None:\n self.vectors[i] = wv_arr[wv_index]\n", "path": "torchtext/vocab.py"}], "after_files": [{"content": "from __future__ import print_function\nimport array\nfrom collections import defaultdict\nimport os\nimport zipfile\n\nimport six\nfrom six.moves.urllib.request import urlretrieve\nimport torch\nfrom tqdm import trange, tqdm\n\nfrom .utils import reporthook\n\nURL = {\n 'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',\n 'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',\n 'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',\n 'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'\n}\n\n\ndef load_word_vectors(root, wv_type, dim):\n \"\"\"Load word vectors from a path, trying .pt, .txt, and .zip extensions.\"\"\"\n if isinstance(dim, int):\n dim = str(dim) + 'd'\n fname = os.path.join(root, wv_type + '.' + dim)\n if os.path.isfile(fname + '.pt'):\n fname_pt = fname + '.pt'\n print('loading word vectors from', fname_pt)\n return torch.load(fname_pt)\n if os.path.isfile(fname + '.txt'):\n fname_txt = fname + '.txt'\n cm = open(fname_txt, 'rb')\n cm = [line for line in cm]\n elif os.path.basename(wv_type) in URL:\n url = URL[wv_type]\n print('downloading word vectors from {}'.format(url))\n filename = os.path.basename(fname)\n if not os.path.exists(root):\n os.makedirs(root)\n with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:\n fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))\n with zipfile.ZipFile(fname, \"r\") as zf:\n print('extracting word vectors into {}'.format(root))\n zf.extractall(root)\n if not os.path.isfile(fname + '.txt'):\n raise RuntimeError('no word vectors of requested dimension found')\n return load_word_vectors(root, wv_type, dim)\n else:\n raise RuntimeError('unable to load word vectors')\n\n wv_tokens, wv_arr, wv_size = [], array.array('d'), None\n if cm is not None:\n print(\"Loading word vectors from {}\".format(fname_txt))\n for line in trange(len(cm)):\n entries = cm[line].strip().split(b' ')\n word, entries = entries[0], entries[1:]\n if wv_size is None:\n wv_size = len(entries)\n try:\n if isinstance(word, six.binary_type):\n word = word.decode('utf-8')\n except:\n print('non-UTF8 token', repr(word), 'ignored')\n continue\n wv_arr.extend(float(x) for x in entries)\n wv_tokens.append(word)\n\n wv_dict = {word: i for i, word in enumerate(wv_tokens)}\n wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)\n ret = (wv_dict, wv_arr, wv_size)\n torch.save(ret, fname + '.pt')\n return ret\n\n\nclass Vocab(object):\n \"\"\"Defines a vocabulary object that will be used to numericalize a field.\n\n Attributes:\n freqs: A collections.Counter object holding the frequencies of tokens\n in the data used to build the Vocab.\n stoi: A collections.defaultdict instance mapping token strings to\n numerical identifiers.\n itos: A list of token strings indexed by their numerical identifiers.\n vectors: A Tensor containing word vectors for the tokens in the Vocab,\n if a word vector file has been provided.\n \"\"\"\n\n def __init__(self, counter, max_size=None, min_freq=1, wv_dir=os.getcwd(),\n wv_type=None, wv_dim=300, unk_init='random',\n specials=['<pad>'], fill_from_vectors=False):\n \"\"\"Create a Vocab object from a collections.Counter.\n\n Arguments:\n counter: collections.Counter object holding the frequencies of\n each value found in the data.\n max_size: The maximum size of the vocabulary, or None for no\n maximum. Default: None.\n min_freq: The minimum frequency needed to include a token in the\n vocabulary. Default: 1.\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n specials: The list of special tokens (e.g., padding or eos) that\n will be prepended to the vocabulary in addition to an <unk>\n token.\n fill_from_vectors: Whether to add to the vocabulary every token\n for which a word vector specified by vectors is present\n even if the token does not appear in the provided data.\n unk_init: default to random initialization for word vectors not in the\n pretrained word vector file; otherwise set to zero\n \"\"\"\n self.freqs = counter.copy()\n self.unk_init = unk_init\n counter.update(['<unk>'] + specials)\n\n if wv_type is not None:\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n\n if fill_from_vectors:\n counter.update(wv_dict.keys())\n\n self.stoi = defaultdict(lambda: 0)\n self.stoi.update({tok: i + 1 for i, tok in enumerate(specials)})\n self.itos = ['<unk>'] + specials\n\n counter.subtract({tok: counter[tok] for tok in ['<unk>'] + specials})\n max_size = None if max_size is None else max_size + len(self.itos)\n\n # sort by frequency, then alphabetically\n words = sorted(counter.items(), key=lambda tup: tup[0])\n words.sort(key=lambda tup: tup[1], reverse=True)\n\n for k, v in words:\n if v < min_freq or len(self.itos) == max_size:\n break\n self.itos.append(k)\n self.stoi[k] = len(self.itos) - 1\n\n if wv_type is not None:\n self.set_vectors(wv_dict, wv_arr)\n\n def __len__(self):\n return len(self.itos)\n\n def load_vectors(self, wv_dir=os.getcwd(), wv_type=None, wv_dim=300,\n unk_init='random'):\n \"\"\"Loads word vectors into the vocab\n\n Arguments:\n wv_dir: directory containing word vector file and destination for\n downloaded word vector files\n wv_type: type of word vectors; None for no word vectors\n wv_dim: dimension of word vectors\n\n unk_init: default to random initialization for unknown word vectors;\n otherwise set to zero\n \"\"\"\n self.unk_init = unk_init\n wv_dict, wv_arr, self.wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)\n self.set_vectors(wv_dict, wv_arr)\n\n def set_vectors(self, wv_dict, wv_arr):\n self.vectors = torch.Tensor(len(self), self.wv_size)\n self.vectors.normal_(0, 1) if self.unk_init == 'random' else self.vectors.zero_()\n for i, token in enumerate(self.itos):\n wv_index = wv_dict.get(token, None)\n if wv_index is not None:\n self.vectors[i] = wv_arr[wv_index]\n", "path": "torchtext/vocab.py"}]}
2,720
150
gh_patches_debug_39714
rasdani/github-patches
git_diff
iterative__dvc-1781
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add `pipeline show --tree` Unfold dependencies into a tree same way it is done in npm: ``` │ │ │ └── [email protected] │ │ ├── [email protected] deduped │ │ ├── [email protected] deduped │ │ ├─┬ [email protected] │ │ │ ├─┬ [email protected] │ │ │ │ └── [email protected] │ │ │ ├── [email protected] │ │ │ └── [email protected] deduped │ │ ├── [email protected] deduped │ │ ├─┬ [email protected] │ │ │ ├─┬ [email protected] │ │ │ │ └─┬ [email protected] │ │ │ │ └── [email protected] │ │ │ └─┬ [email protected] │ │ │ ├── [email protected] │ │ │ └── [email protected] │ │ └── [email protected] deduped │ ├─┬ [email protected] ``` Kudos @shcheklein --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/command/pipeline.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from dvc.utils.compat import str 4 5 import os 6 7 import dvc.logger as logger 8 from dvc.exceptions import DvcException 9 from dvc.command.base import CmdBase, fix_subparsers 10 11 12 class CmdPipelineShow(CmdBase): 13 def _show(self, target, commands, outs): 14 import networkx 15 from dvc.stage import Stage 16 17 stage = Stage.load(self.repo, target) 18 G = self.repo.graph()[0] 19 stages = networkx.get_node_attributes(G, "stage") 20 node = os.path.relpath(stage.path, self.repo.root_dir) 21 22 for n in networkx.dfs_postorder_nodes(G, node): 23 if commands: 24 logger.info(stages[n].cmd) 25 elif outs: 26 for out in stages[n].outs: 27 logger.info(str(out)) 28 else: 29 logger.info(n) 30 31 def __build_graph(self, target, commands, outs): 32 import networkx 33 from dvc.stage import Stage 34 35 stage = Stage.load(self.repo, target) 36 node = os.path.relpath(stage.path, self.repo.root_dir) 37 38 pipelines = list( 39 filter(lambda g: node in g.nodes(), self.repo.pipelines()) 40 ) 41 42 assert len(pipelines) == 1 43 G = pipelines[0] 44 stages = networkx.get_node_attributes(G, "stage") 45 46 nodes = [] 47 for n in G.nodes(): 48 stage = stages[n] 49 if commands: 50 if stage.cmd is None: 51 continue 52 nodes.append(stage.cmd) 53 elif outs: 54 for out in stage.outs: 55 nodes.append(str(out)) 56 else: 57 nodes.append(stage.relpath) 58 59 edges = [] 60 for e in G.edges(): 61 from_stage = stages[e[0]] 62 to_stage = stages[e[1]] 63 if commands: 64 if to_stage.cmd is None: 65 continue 66 edges.append((from_stage.cmd, to_stage.cmd)) 67 elif outs: 68 for from_out in from_stage.outs: 69 for to_out in to_stage.outs: 70 edges.append((str(from_out), str(to_out))) 71 else: 72 edges.append((from_stage.relpath, to_stage.relpath)) 73 74 return nodes, edges 75 76 def _show_ascii(self, target, commands, outs): 77 from dvc.dagascii import draw 78 79 nodes, edges = self.__build_graph(target, commands, outs) 80 81 if not nodes: 82 return 83 84 draw(nodes, edges) 85 86 def __write_dot(self, target, commands, outs, filename): 87 import networkx 88 from networkx.drawing.nx_pydot import write_dot 89 90 _, edges = self.__build_graph(target, commands, outs) 91 edges = [edge[::-1] for edge in edges] 92 93 simple_g = networkx.DiGraph() 94 simple_g.add_edges_from(edges) 95 write_dot(simple_g, filename) 96 97 def run(self, unlock=False): 98 if not self.args.targets: 99 self.args.targets = self.default_targets 100 101 for target in self.args.targets: 102 try: 103 if self.args.ascii: 104 self._show_ascii( 105 target, self.args.commands, self.args.outs 106 ) 107 elif self.args.dot: 108 self.__write_dot( 109 target, 110 self.args.commands, 111 self.args.outs, 112 self.args.dot, 113 ) 114 else: 115 self._show(target, self.args.commands, self.args.outs) 116 except DvcException: 117 msg = "failed to show pipeline for '{}'".format(target) 118 logger.error(msg) 119 return 1 120 return 0 121 122 123 class CmdPipelineList(CmdBase): 124 def run(self): 125 import networkx 126 127 pipelines = self.repo.pipelines() 128 for p in pipelines: 129 stages = networkx.get_node_attributes(p, "stage") 130 for stage in stages: 131 logger.info(stage) 132 if len(stages) != 0: 133 logger.info("=" * 80) 134 logger.info("{} pipeline(s) total".format(len(pipelines))) 135 136 return 0 137 138 139 def add_parser(subparsers, parent_parser): 140 PIPELINE_HELP = ( 141 "Manage pipeline.\ndocumentation: https://man.dvc.org/pipeline" 142 ) 143 pipeline_parser = subparsers.add_parser( 144 "pipeline", 145 parents=[parent_parser], 146 description=PIPELINE_HELP, 147 help=PIPELINE_HELP, 148 ) 149 150 pipeline_subparsers = pipeline_parser.add_subparsers( 151 dest="cmd", 152 help="Use dvc pipeline CMD --help for command-specific help.", 153 ) 154 155 fix_subparsers(pipeline_subparsers) 156 157 PIPELINE_SHOW_HELP = "Show pipeline." 158 pipeline_show_parser = pipeline_subparsers.add_parser( 159 "show", 160 parents=[parent_parser], 161 description=PIPELINE_SHOW_HELP, 162 help=PIPELINE_SHOW_HELP, 163 ) 164 pipeline_show_group = pipeline_show_parser.add_mutually_exclusive_group() 165 pipeline_show_group.add_argument( 166 "-c", 167 "--commands", 168 action="store_true", 169 default=False, 170 help="Print commands instead of paths to DVC files.", 171 ) 172 pipeline_show_group.add_argument( 173 "-o", 174 "--outs", 175 action="store_true", 176 default=False, 177 help="Print output files instead of paths to DVC files.", 178 ) 179 pipeline_show_parser.add_argument( 180 "--ascii", 181 action="store_true", 182 default=False, 183 help="Output DAG as ASCII.", 184 ) 185 pipeline_show_parser.add_argument( 186 "--dot", help="Write DAG in .dot format." 187 ) 188 pipeline_show_parser.add_argument( 189 "targets", nargs="*", help="DVC files. 'Dvcfile' by default." 190 ) 191 pipeline_show_parser.set_defaults(func=CmdPipelineShow) 192 193 PIPELINE_LIST_HELP = "List pipelines." 194 pipeline_list_parser = pipeline_subparsers.add_parser( 195 "list", 196 parents=[parent_parser], 197 description=PIPELINE_LIST_HELP, 198 help=PIPELINE_LIST_HELP, 199 ) 200 pipeline_list_parser.set_defaults(func=CmdPipelineList) 201 ``` Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 from dvc import VERSION 3 4 5 install_requires = [ 6 "ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945 7 "configparser>=3.5.0", 8 "zc.lockfile>=1.2.1", 9 "future>=0.16.0", 10 "colorama>=0.3.9", 11 "configobj>=5.0.6", 12 "networkx>=2.1", 13 "pyyaml>=3.12", 14 "gitpython>=2.1.8", 15 "setuptools>=34.0.0", 16 "nanotime>=0.5.2", 17 "pyasn1>=0.4.1", 18 "schema>=0.6.7", 19 "jsonpath-rw==1.4.0", 20 "requests>=2.18.4", 21 "grandalf==0.6", 22 "asciimatics>=1.10.0", 23 "distro>=1.3.0", 24 "appdirs>=1.4.3", 25 ] 26 27 # Extra dependencies for remote integrations 28 gs = ["google-cloud-storage==1.13.0"] 29 s3 = ["boto3==1.9.115"] 30 azure = ["azure-storage-blob==1.3.0"] 31 ssh = ["paramiko>=2.4.1"] 32 all_remotes = gs + s3 + azure + ssh 33 34 setup( 35 name="dvc", 36 version=VERSION, 37 description="Git for data scientists - manage your code and data together", 38 long_description=open("README.rst", "r").read(), 39 author="Dmitry Petrov", 40 author_email="[email protected]", 41 download_url="https://github.com/iterative/dvc", 42 license="Apache License 2.0", 43 install_requires=install_requires, 44 extras_require={ 45 "all": all_remotes, 46 "gs": gs, 47 "s3": s3, 48 "azure": azure, 49 "ssh": ssh, 50 # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1 51 ':python_version=="2.7"': ["futures"], 52 }, 53 keywords="data science, data version control, machine learning", 54 classifiers=[ 55 "Development Status :: 4 - Beta", 56 "Programming Language :: Python :: 2", 57 "Programming Language :: Python :: 3", 58 ], 59 packages=find_packages(exclude=["tests"]), 60 include_package_data=True, 61 url="http://dataversioncontrol.com", 62 entry_points={"console_scripts": ["dvc = dvc.main:main"]}, 63 zip_safe=False, 64 ) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/command/pipeline.py b/dvc/command/pipeline.py --- a/dvc/command/pipeline.py +++ b/dvc/command/pipeline.py @@ -71,23 +71,47 @@ else: edges.append((from_stage.relpath, to_stage.relpath)) - return nodes, edges + return nodes, edges, networkx.is_tree(G) def _show_ascii(self, target, commands, outs): from dvc.dagascii import draw - nodes, edges = self.__build_graph(target, commands, outs) + nodes, edges, _ = self.__build_graph(target, commands, outs) if not nodes: return draw(nodes, edges) + def _show_dependencies_tree(self, target, commands, outs): + from treelib import Tree + + nodes, edges, is_tree = self.__build_graph(target, commands, outs) + if not nodes: + return + if not is_tree: + raise DvcException( + "DAG is not a tree, can not print it in tree-structure way, " + "please use --ascii instead" + ) + + tree = Tree() + tree.create_node(target, target) # Root node + observe_list = [target] + while len(observe_list) > 0: + current_root = observe_list[0] + for edge in edges: + if edge[0] == current_root: + tree.create_node(edge[1], edge[1], parent=current_root) + observe_list.append(edge[1]) + observe_list.pop(0) + tree.show() + def __write_dot(self, target, commands, outs, filename): import networkx from networkx.drawing.nx_pydot import write_dot - _, edges = self.__build_graph(target, commands, outs) + _, edges, _ = self.__build_graph(target, commands, outs) edges = [edge[::-1] for edge in edges] simple_g = networkx.DiGraph() @@ -111,6 +135,10 @@ self.args.outs, self.args.dot, ) + elif self.args.tree: + self._show_dependencies_tree( + target, self.args.commands, self.args.outs + ) else: self._show(target, self.args.commands, self.args.outs) except DvcException: @@ -185,6 +213,12 @@ pipeline_show_parser.add_argument( "--dot", help="Write DAG in .dot format." ) + pipeline_show_parser.add_argument( + "--tree", + action="store_true", + default=False, + help="Output DAG as Dependencies Tree.", + ) pipeline_show_parser.add_argument( "targets", nargs="*", help="DVC files. 'Dvcfile' by default." ) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -22,6 +22,7 @@ "asciimatics>=1.10.0", "distro>=1.3.0", "appdirs>=1.4.3", + "treelib>=1.5.5", ] # Extra dependencies for remote integrations
{"golden_diff": "diff --git a/dvc/command/pipeline.py b/dvc/command/pipeline.py\n--- a/dvc/command/pipeline.py\n+++ b/dvc/command/pipeline.py\n@@ -71,23 +71,47 @@\n else:\n edges.append((from_stage.relpath, to_stage.relpath))\n \n- return nodes, edges\n+ return nodes, edges, networkx.is_tree(G)\n \n def _show_ascii(self, target, commands, outs):\n from dvc.dagascii import draw\n \n- nodes, edges = self.__build_graph(target, commands, outs)\n+ nodes, edges, _ = self.__build_graph(target, commands, outs)\n \n if not nodes:\n return\n \n draw(nodes, edges)\n \n+ def _show_dependencies_tree(self, target, commands, outs):\n+ from treelib import Tree\n+\n+ nodes, edges, is_tree = self.__build_graph(target, commands, outs)\n+ if not nodes:\n+ return\n+ if not is_tree:\n+ raise DvcException(\n+ \"DAG is not a tree, can not print it in tree-structure way, \"\n+ \"please use --ascii instead\"\n+ )\n+\n+ tree = Tree()\n+ tree.create_node(target, target) # Root node\n+ observe_list = [target]\n+ while len(observe_list) > 0:\n+ current_root = observe_list[0]\n+ for edge in edges:\n+ if edge[0] == current_root:\n+ tree.create_node(edge[1], edge[1], parent=current_root)\n+ observe_list.append(edge[1])\n+ observe_list.pop(0)\n+ tree.show()\n+\n def __write_dot(self, target, commands, outs, filename):\n import networkx\n from networkx.drawing.nx_pydot import write_dot\n \n- _, edges = self.__build_graph(target, commands, outs)\n+ _, edges, _ = self.__build_graph(target, commands, outs)\n edges = [edge[::-1] for edge in edges]\n \n simple_g = networkx.DiGraph()\n@@ -111,6 +135,10 @@\n self.args.outs,\n self.args.dot,\n )\n+ elif self.args.tree:\n+ self._show_dependencies_tree(\n+ target, self.args.commands, self.args.outs\n+ )\n else:\n self._show(target, self.args.commands, self.args.outs)\n except DvcException:\n@@ -185,6 +213,12 @@\n pipeline_show_parser.add_argument(\n \"--dot\", help=\"Write DAG in .dot format.\"\n )\n+ pipeline_show_parser.add_argument(\n+ \"--tree\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Output DAG as Dependencies Tree.\",\n+ )\n pipeline_show_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC files. 'Dvcfile' by default.\"\n )\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,7 @@\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n+ \"treelib>=1.5.5\",\n ]\n \n # Extra dependencies for remote integrations\n", "issue": "add `pipeline show --tree`\nUnfold dependencies into a tree same way it is done in npm:\r\n\r\n```\r\n\u2502 \u2502 \u2502 \u2514\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u251c\u2500\u2500 [email protected] deduped\r\n \u2502 \u2502 \u251c\u2500\u2500 [email protected] deduped\r\n \u2502 \u2502 \u251c\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u251c\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u2502 \u2514\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u2502 \u251c\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u2502 \u2514\u2500\u2500 [email protected] deduped\r\n \u2502 \u2502 \u251c\u2500\u2500 [email protected] deduped\r\n \u2502 \u2502 \u251c\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u251c\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u2502 \u2514\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u2502 \u2514\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u2502 \u2514\u2500\u252c [email protected]\r\n \u2502 \u2502 \u2502 \u251c\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u2502 \u2514\u2500\u2500 [email protected]\r\n \u2502 \u2502 \u2514\u2500\u2500 [email protected] deduped\r\n \u2502 \u251c\u2500\u252c [email protected]\r\n```\r\n\r\nKudos @shcheklein \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom dvc.utils.compat import str\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, fix_subparsers\n\n\nclass CmdPipelineShow(CmdBase):\n def _show(self, target, commands, outs):\n import networkx\n from dvc.stage import Stage\n\n stage = Stage.load(self.repo, target)\n G = self.repo.graph()[0]\n stages = networkx.get_node_attributes(G, \"stage\")\n node = os.path.relpath(stage.path, self.repo.root_dir)\n\n for n in networkx.dfs_postorder_nodes(G, node):\n if commands:\n logger.info(stages[n].cmd)\n elif outs:\n for out in stages[n].outs:\n logger.info(str(out))\n else:\n logger.info(n)\n\n def __build_graph(self, target, commands, outs):\n import networkx\n from dvc.stage import Stage\n\n stage = Stage.load(self.repo, target)\n node = os.path.relpath(stage.path, self.repo.root_dir)\n\n pipelines = list(\n filter(lambda g: node in g.nodes(), self.repo.pipelines())\n )\n\n assert len(pipelines) == 1\n G = pipelines[0]\n stages = networkx.get_node_attributes(G, \"stage\")\n\n nodes = []\n for n in G.nodes():\n stage = stages[n]\n if commands:\n if stage.cmd is None:\n continue\n nodes.append(stage.cmd)\n elif outs:\n for out in stage.outs:\n nodes.append(str(out))\n else:\n nodes.append(stage.relpath)\n\n edges = []\n for e in G.edges():\n from_stage = stages[e[0]]\n to_stage = stages[e[1]]\n if commands:\n if to_stage.cmd is None:\n continue\n edges.append((from_stage.cmd, to_stage.cmd))\n elif outs:\n for from_out in from_stage.outs:\n for to_out in to_stage.outs:\n edges.append((str(from_out), str(to_out)))\n else:\n edges.append((from_stage.relpath, to_stage.relpath))\n\n return nodes, edges\n\n def _show_ascii(self, target, commands, outs):\n from dvc.dagascii import draw\n\n nodes, edges = self.__build_graph(target, commands, outs)\n\n if not nodes:\n return\n\n draw(nodes, edges)\n\n def __write_dot(self, target, commands, outs, filename):\n import networkx\n from networkx.drawing.nx_pydot import write_dot\n\n _, edges = self.__build_graph(target, commands, outs)\n edges = [edge[::-1] for edge in edges]\n\n simple_g = networkx.DiGraph()\n simple_g.add_edges_from(edges)\n write_dot(simple_g, filename)\n\n def run(self, unlock=False):\n if not self.args.targets:\n self.args.targets = self.default_targets\n\n for target in self.args.targets:\n try:\n if self.args.ascii:\n self._show_ascii(\n target, self.args.commands, self.args.outs\n )\n elif self.args.dot:\n self.__write_dot(\n target,\n self.args.commands,\n self.args.outs,\n self.args.dot,\n )\n else:\n self._show(target, self.args.commands, self.args.outs)\n except DvcException:\n msg = \"failed to show pipeline for '{}'\".format(target)\n logger.error(msg)\n return 1\n return 0\n\n\nclass CmdPipelineList(CmdBase):\n def run(self):\n import networkx\n\n pipelines = self.repo.pipelines()\n for p in pipelines:\n stages = networkx.get_node_attributes(p, \"stage\")\n for stage in stages:\n logger.info(stage)\n if len(stages) != 0:\n logger.info(\"=\" * 80)\n logger.info(\"{} pipeline(s) total\".format(len(pipelines)))\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n PIPELINE_HELP = (\n \"Manage pipeline.\\ndocumentation: https://man.dvc.org/pipeline\"\n )\n pipeline_parser = subparsers.add_parser(\n \"pipeline\",\n parents=[parent_parser],\n description=PIPELINE_HELP,\n help=PIPELINE_HELP,\n )\n\n pipeline_subparsers = pipeline_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use dvc pipeline CMD --help for command-specific help.\",\n )\n\n fix_subparsers(pipeline_subparsers)\n\n PIPELINE_SHOW_HELP = \"Show pipeline.\"\n pipeline_show_parser = pipeline_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=PIPELINE_SHOW_HELP,\n help=PIPELINE_SHOW_HELP,\n )\n pipeline_show_group = pipeline_show_parser.add_mutually_exclusive_group()\n pipeline_show_group.add_argument(\n \"-c\",\n \"--commands\",\n action=\"store_true\",\n default=False,\n help=\"Print commands instead of paths to DVC files.\",\n )\n pipeline_show_group.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=False,\n help=\"Print output files instead of paths to DVC files.\",\n )\n pipeline_show_parser.add_argument(\n \"--ascii\",\n action=\"store_true\",\n default=False,\n help=\"Output DAG as ASCII.\",\n )\n pipeline_show_parser.add_argument(\n \"--dot\", help=\"Write DAG in .dot format.\"\n )\n pipeline_show_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC files. 'Dvcfile' by default.\"\n )\n pipeline_show_parser.set_defaults(func=CmdPipelineShow)\n\n PIPELINE_LIST_HELP = \"List pipelines.\"\n pipeline_list_parser = pipeline_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=PIPELINE_LIST_HELP,\n help=PIPELINE_LIST_HELP,\n )\n pipeline_list_parser.set_defaults(func=CmdPipelineList)\n", "path": "dvc/command/pipeline.py"}, {"content": "from setuptools import setup, find_packages\nfrom dvc import VERSION\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"networkx>=2.1\",\n \"pyyaml>=3.12\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-rw==1.4.0\",\n \"requests>=2.18.4\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n]\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==1.3.0\"]\nssh = [\"paramiko>=2.4.1\"]\nall_remotes = gs + s3 + azure + ssh\n\nsetup(\n name=\"dvc\",\n version=VERSION,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n ':python_version==\"2.7\"': [\"futures\"],\n },\n keywords=\"data science, data version control, machine learning\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom dvc.utils.compat import str\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, fix_subparsers\n\n\nclass CmdPipelineShow(CmdBase):\n def _show(self, target, commands, outs):\n import networkx\n from dvc.stage import Stage\n\n stage = Stage.load(self.repo, target)\n G = self.repo.graph()[0]\n stages = networkx.get_node_attributes(G, \"stage\")\n node = os.path.relpath(stage.path, self.repo.root_dir)\n\n for n in networkx.dfs_postorder_nodes(G, node):\n if commands:\n logger.info(stages[n].cmd)\n elif outs:\n for out in stages[n].outs:\n logger.info(str(out))\n else:\n logger.info(n)\n\n def __build_graph(self, target, commands, outs):\n import networkx\n from dvc.stage import Stage\n\n stage = Stage.load(self.repo, target)\n node = os.path.relpath(stage.path, self.repo.root_dir)\n\n pipelines = list(\n filter(lambda g: node in g.nodes(), self.repo.pipelines())\n )\n\n assert len(pipelines) == 1\n G = pipelines[0]\n stages = networkx.get_node_attributes(G, \"stage\")\n\n nodes = []\n for n in G.nodes():\n stage = stages[n]\n if commands:\n if stage.cmd is None:\n continue\n nodes.append(stage.cmd)\n elif outs:\n for out in stage.outs:\n nodes.append(str(out))\n else:\n nodes.append(stage.relpath)\n\n edges = []\n for e in G.edges():\n from_stage = stages[e[0]]\n to_stage = stages[e[1]]\n if commands:\n if to_stage.cmd is None:\n continue\n edges.append((from_stage.cmd, to_stage.cmd))\n elif outs:\n for from_out in from_stage.outs:\n for to_out in to_stage.outs:\n edges.append((str(from_out), str(to_out)))\n else:\n edges.append((from_stage.relpath, to_stage.relpath))\n\n return nodes, edges, networkx.is_tree(G)\n\n def _show_ascii(self, target, commands, outs):\n from dvc.dagascii import draw\n\n nodes, edges, _ = self.__build_graph(target, commands, outs)\n\n if not nodes:\n return\n\n draw(nodes, edges)\n\n def _show_dependencies_tree(self, target, commands, outs):\n from treelib import Tree\n\n nodes, edges, is_tree = self.__build_graph(target, commands, outs)\n if not nodes:\n return\n if not is_tree:\n raise DvcException(\n \"DAG is not a tree, can not print it in tree-structure way, \"\n \"please use --ascii instead\"\n )\n\n tree = Tree()\n tree.create_node(target, target) # Root node\n observe_list = [target]\n while len(observe_list) > 0:\n current_root = observe_list[0]\n for edge in edges:\n if edge[0] == current_root:\n tree.create_node(edge[1], edge[1], parent=current_root)\n observe_list.append(edge[1])\n observe_list.pop(0)\n tree.show()\n\n def __write_dot(self, target, commands, outs, filename):\n import networkx\n from networkx.drawing.nx_pydot import write_dot\n\n _, edges, _ = self.__build_graph(target, commands, outs)\n edges = [edge[::-1] for edge in edges]\n\n simple_g = networkx.DiGraph()\n simple_g.add_edges_from(edges)\n write_dot(simple_g, filename)\n\n def run(self, unlock=False):\n if not self.args.targets:\n self.args.targets = self.default_targets\n\n for target in self.args.targets:\n try:\n if self.args.ascii:\n self._show_ascii(\n target, self.args.commands, self.args.outs\n )\n elif self.args.dot:\n self.__write_dot(\n target,\n self.args.commands,\n self.args.outs,\n self.args.dot,\n )\n elif self.args.tree:\n self._show_dependencies_tree(\n target, self.args.commands, self.args.outs\n )\n else:\n self._show(target, self.args.commands, self.args.outs)\n except DvcException:\n msg = \"failed to show pipeline for '{}'\".format(target)\n logger.error(msg)\n return 1\n return 0\n\n\nclass CmdPipelineList(CmdBase):\n def run(self):\n import networkx\n\n pipelines = self.repo.pipelines()\n for p in pipelines:\n stages = networkx.get_node_attributes(p, \"stage\")\n for stage in stages:\n logger.info(stage)\n if len(stages) != 0:\n logger.info(\"=\" * 80)\n logger.info(\"{} pipeline(s) total\".format(len(pipelines)))\n\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n PIPELINE_HELP = (\n \"Manage pipeline.\\ndocumentation: https://man.dvc.org/pipeline\"\n )\n pipeline_parser = subparsers.add_parser(\n \"pipeline\",\n parents=[parent_parser],\n description=PIPELINE_HELP,\n help=PIPELINE_HELP,\n )\n\n pipeline_subparsers = pipeline_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use dvc pipeline CMD --help for command-specific help.\",\n )\n\n fix_subparsers(pipeline_subparsers)\n\n PIPELINE_SHOW_HELP = \"Show pipeline.\"\n pipeline_show_parser = pipeline_subparsers.add_parser(\n \"show\",\n parents=[parent_parser],\n description=PIPELINE_SHOW_HELP,\n help=PIPELINE_SHOW_HELP,\n )\n pipeline_show_group = pipeline_show_parser.add_mutually_exclusive_group()\n pipeline_show_group.add_argument(\n \"-c\",\n \"--commands\",\n action=\"store_true\",\n default=False,\n help=\"Print commands instead of paths to DVC files.\",\n )\n pipeline_show_group.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=False,\n help=\"Print output files instead of paths to DVC files.\",\n )\n pipeline_show_parser.add_argument(\n \"--ascii\",\n action=\"store_true\",\n default=False,\n help=\"Output DAG as ASCII.\",\n )\n pipeline_show_parser.add_argument(\n \"--dot\", help=\"Write DAG in .dot format.\"\n )\n pipeline_show_parser.add_argument(\n \"--tree\",\n action=\"store_true\",\n default=False,\n help=\"Output DAG as Dependencies Tree.\",\n )\n pipeline_show_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC files. 'Dvcfile' by default.\"\n )\n pipeline_show_parser.set_defaults(func=CmdPipelineShow)\n\n PIPELINE_LIST_HELP = \"List pipelines.\"\n pipeline_list_parser = pipeline_subparsers.add_parser(\n \"list\",\n parents=[parent_parser],\n description=PIPELINE_LIST_HELP,\n help=PIPELINE_LIST_HELP,\n )\n pipeline_list_parser.set_defaults(func=CmdPipelineList)\n", "path": "dvc/command/pipeline.py"}, {"content": "from setuptools import setup, find_packages\nfrom dvc import VERSION\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"networkx>=2.1\",\n \"pyyaml>=3.12\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-rw==1.4.0\",\n \"requests>=2.18.4\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n]\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==1.3.0\"]\nssh = [\"paramiko>=2.4.1\"]\nall_remotes = gs + s3 + azure + ssh\n\nsetup(\n name=\"dvc\",\n version=VERSION,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n ':python_version==\"2.7\"': [\"futures\"],\n },\n keywords=\"data science, data version control, machine learning\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n zip_safe=False,\n)\n", "path": "setup.py"}]}
3,113
738
gh_patches_debug_21093
rasdani/github-patches
git_diff
liqd__adhocracy4-294
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IntegrityError in ratings See: - https://sentry.liqd.net/sentry/debattenportalspdde/issues/84/ - https://sentry.liqd.net/sentry/meinberlin-prod/issues/5/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `adhocracy4/ratings/api.py` Content: ``` 1 from django.conf import settings 2 3 from django_filters import rest_framework as filters 4 from rest_framework import mixins, viewsets 5 from rest_framework.response import Response 6 7 from adhocracy4.api.mixins import ContentTypeMixin 8 from adhocracy4.api.permissions import ViewSetRulesPermission 9 10 from .models import Rating 11 from .serializers import RatingSerializer 12 13 14 class RatingViewSet(mixins.CreateModelMixin, 15 mixins.UpdateModelMixin, 16 ContentTypeMixin, 17 viewsets.GenericViewSet): 18 19 queryset = Rating.objects.all() 20 serializer_class = RatingSerializer 21 permission_classes = (ViewSetRulesPermission,) 22 filter_backends = (filters.DjangoFilterBackend,) 23 filter_fields = ('object_pk', 'content_type') 24 content_type_filter = settings.A4_RATEABLES 25 26 def perform_create(self, serializer): 27 serializer.save( 28 content_object=self.content_object, 29 creator=self.request.user 30 ) 31 32 def get_permission_object(self): 33 return self.content_object 34 35 @property 36 def rules_method_map(self): 37 return ViewSetRulesPermission.default_rules_method_map._replace( 38 POST='{app_label}.rate_{model}'.format( 39 app_label=self.content_type.app_label, 40 model=self.content_type.model 41 ) 42 ) 43 44 def destroy(self, request, content_type, object_pk, pk=None): 45 """ 46 Sets value to zero 47 NOTE: Rating is NOT deleted. 48 """ 49 rating = self.get_object() 50 rating.update(0) 51 serializer = self.get_serializer(rating) 52 return Response(serializer.data) 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/adhocracy4/ratings/api.py b/adhocracy4/ratings/api.py --- a/adhocracy4/ratings/api.py +++ b/adhocracy4/ratings/api.py @@ -3,6 +3,7 @@ from django_filters import rest_framework as filters from rest_framework import mixins, viewsets from rest_framework.response import Response +from rest_framework.serializers import ValidationError from adhocracy4.api.mixins import ContentTypeMixin from adhocracy4.api.permissions import ViewSetRulesPermission @@ -24,6 +25,11 @@ content_type_filter = settings.A4_RATEABLES def perform_create(self, serializer): + queryset = Rating.objects.filter(content_type_id=self.content_type.pk, + creator=self.request.user, + object_pk=self.content_object.pk) + if queryset.exists(): + raise ValidationError(queryset[0].pk) serializer.save( content_object=self.content_object, creator=self.request.user
{"golden_diff": "diff --git a/adhocracy4/ratings/api.py b/adhocracy4/ratings/api.py\n--- a/adhocracy4/ratings/api.py\n+++ b/adhocracy4/ratings/api.py\n@@ -3,6 +3,7 @@\n from django_filters import rest_framework as filters\n from rest_framework import mixins, viewsets\n from rest_framework.response import Response\n+from rest_framework.serializers import ValidationError\n \n from adhocracy4.api.mixins import ContentTypeMixin\n from adhocracy4.api.permissions import ViewSetRulesPermission\n@@ -24,6 +25,11 @@\n content_type_filter = settings.A4_RATEABLES\n \n def perform_create(self, serializer):\n+ queryset = Rating.objects.filter(content_type_id=self.content_type.pk,\n+ creator=self.request.user,\n+ object_pk=self.content_object.pk)\n+ if queryset.exists():\n+ raise ValidationError(queryset[0].pk)\n serializer.save(\n content_object=self.content_object,\n creator=self.request.user\n", "issue": "IntegrityError in ratings\nSee:\r\n- https://sentry.liqd.net/sentry/debattenportalspdde/issues/84/\r\n- https://sentry.liqd.net/sentry/meinberlin-prod/issues/5/\n", "before_files": [{"content": "from django.conf import settings\n\nfrom django_filters import rest_framework as filters\nfrom rest_framework import mixins, viewsets\nfrom rest_framework.response import Response\n\nfrom adhocracy4.api.mixins import ContentTypeMixin\nfrom adhocracy4.api.permissions import ViewSetRulesPermission\n\nfrom .models import Rating\nfrom .serializers import RatingSerializer\n\n\nclass RatingViewSet(mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n ContentTypeMixin,\n viewsets.GenericViewSet):\n\n queryset = Rating.objects.all()\n serializer_class = RatingSerializer\n permission_classes = (ViewSetRulesPermission,)\n filter_backends = (filters.DjangoFilterBackend,)\n filter_fields = ('object_pk', 'content_type')\n content_type_filter = settings.A4_RATEABLES\n\n def perform_create(self, serializer):\n serializer.save(\n content_object=self.content_object,\n creator=self.request.user\n )\n\n def get_permission_object(self):\n return self.content_object\n\n @property\n def rules_method_map(self):\n return ViewSetRulesPermission.default_rules_method_map._replace(\n POST='{app_label}.rate_{model}'.format(\n app_label=self.content_type.app_label,\n model=self.content_type.model\n )\n )\n\n def destroy(self, request, content_type, object_pk, pk=None):\n \"\"\"\n Sets value to zero\n NOTE: Rating is NOT deleted.\n \"\"\"\n rating = self.get_object()\n rating.update(0)\n serializer = self.get_serializer(rating)\n return Response(serializer.data)\n", "path": "adhocracy4/ratings/api.py"}], "after_files": [{"content": "from django.conf import settings\n\nfrom django_filters import rest_framework as filters\nfrom rest_framework import mixins, viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\n\nfrom adhocracy4.api.mixins import ContentTypeMixin\nfrom adhocracy4.api.permissions import ViewSetRulesPermission\n\nfrom .models import Rating\nfrom .serializers import RatingSerializer\n\n\nclass RatingViewSet(mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n ContentTypeMixin,\n viewsets.GenericViewSet):\n\n queryset = Rating.objects.all()\n serializer_class = RatingSerializer\n permission_classes = (ViewSetRulesPermission,)\n filter_backends = (filters.DjangoFilterBackend,)\n filter_fields = ('object_pk', 'content_type')\n content_type_filter = settings.A4_RATEABLES\n\n def perform_create(self, serializer):\n queryset = Rating.objects.filter(content_type_id=self.content_type.pk,\n creator=self.request.user,\n object_pk=self.content_object.pk)\n if queryset.exists():\n raise ValidationError(queryset[0].pk)\n serializer.save(\n content_object=self.content_object,\n creator=self.request.user\n )\n\n def get_permission_object(self):\n return self.content_object\n\n @property\n def rules_method_map(self):\n return ViewSetRulesPermission.default_rules_method_map._replace(\n POST='{app_label}.rate_{model}'.format(\n app_label=self.content_type.app_label,\n model=self.content_type.model\n )\n )\n\n def destroy(self, request, content_type, object_pk, pk=None):\n \"\"\"\n Sets value to zero\n NOTE: Rating is NOT deleted.\n \"\"\"\n rating = self.get_object()\n rating.update(0)\n serializer = self.get_serializer(rating)\n return Response(serializer.data)\n", "path": "adhocracy4/ratings/api.py"}]}
731
211
gh_patches_debug_2458
rasdani/github-patches
git_diff
Parsl__parsl-186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow `DataFuture` to be initialized with a `str` file object [Here](https://github.com/Parsl/parsl/blob/master/parsl/app/futures.py#L77) we check if `file_obj` is `str`. Now that `File` is subclassed from `str`, this will always evaluate as `True`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/app/futures.py` Content: ``` 1 """This module implements DataFutures. 2 3 We have two basic types of futures: 4 1. DataFutures which represent data objects 5 2. AppFutures which represent the futures on App/Leaf tasks. 6 """ 7 import os 8 import logging 9 from concurrent.futures import Future 10 11 from parsl.dataflow.futures import AppFuture 12 from parsl.app.errors import * 13 from parsl.data_provider.files import File 14 15 logger = logging.getLogger(__name__) 16 17 # Possible future states (for internal use by the futures package). 18 PENDING = 'PENDING' 19 RUNNING = 'RUNNING' 20 # The future was cancelled by the user... 21 CANCELLED = 'CANCELLED' 22 # ...and _Waiter.add_cancelled() was called by a worker. 23 CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' 24 FINISHED = 'FINISHED' 25 26 _STATE_TO_DESCRIPTION_MAP = { 27 PENDING: "pending", 28 RUNNING: "running", 29 CANCELLED: "cancelled", 30 CANCELLED_AND_NOTIFIED: "cancelled", 31 FINISHED: "finished" 32 } 33 34 35 class DataFuture(Future): 36 """A datafuture points at an AppFuture. 37 38 We are simply wrapping a AppFuture, and adding the specific case where, if 39 the future is resolved i.e file exists, then the DataFuture is assumed to be 40 resolved. 41 """ 42 43 def parent_callback(self, parent_fu): 44 """Callback from executor future to update the parent. 45 46 Args: 47 - parent_fu (Future): Future returned by the executor along with callback 48 49 Returns: 50 - None 51 52 Updates the super() with the result() or exception() 53 """ 54 if parent_fu.done() is True: 55 e = parent_fu._exception 56 if e: 57 super().set_exception(e) 58 else: 59 super().set_result(parent_fu.result()) 60 return 61 62 def __init__(self, fut, file_obj, parent=None, tid=None): 63 """Construct the DataFuture object. 64 65 If the file_obj is a string convert to a File. 66 67 Args: 68 - fut (AppFuture) : AppFuture that this DataFuture will track 69 - file_obj (string/File obj) : Something representing file(s) 70 71 Kwargs: 72 - parent () 73 - tid (task_id) : Task id that this DataFuture tracks 74 """ 75 super().__init__() 76 self._tid = tid 77 if isinstance(file_obj, str): 78 self.file_obj = File(file_obj) 79 else: 80 self.file_obj = file_obj 81 self.parent = parent 82 self._exception = None 83 84 if fut is None: 85 logger.debug("Setting result to filepath since no future was passed") 86 self.set_result = self.file_obj 87 88 else: 89 if isinstance(fut, Future): 90 self.parent = fut 91 self.parent.add_done_callback(self.parent_callback) 92 else: 93 raise NotFutureError("DataFuture can be created only with a FunctionFuture on None") 94 95 logger.debug("Creating DataFuture with parent : %s", parent) 96 logger.debug("Filepath : %s", self.filepath) 97 98 @property 99 def tid(self): 100 """Returns the task_id of the task that will resolve this DataFuture.""" 101 return self._tid 102 103 @property 104 def filepath(self): 105 """Filepath of the File object this datafuture represents.""" 106 return self.file_obj.filepath 107 108 @property 109 def filename(self): 110 """Filepath of the File object this datafuture represents.""" 111 return self.filepath 112 113 def result(self, timeout=None): 114 """A blocking call that returns either the result or raises an exception. 115 116 Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when 117 setup. 118 119 Kwargs: 120 - timeout (int): Timeout in seconds 121 122 Returns: 123 - If App completed successfully returns the filepath. 124 125 Raises: 126 - Exception raised by app if failed. 127 128 """ 129 if self.parent: 130 if self.parent.done(): 131 # This explicit call to raise exceptions might be redundant. 132 # the result() call *should* raise an exception if there's one 133 e = self.parent._exception 134 if e: 135 raise e 136 else: 137 self.parent.result(timeout=timeout) 138 else: 139 self.parent.result(timeout=timeout) 140 141 return self.file_obj 142 143 def cancel(self): 144 """Cancel the task that this DataFuture is tracking. 145 146 Note: This may not work 147 """ 148 if self.parent: 149 return self.parent.cancel 150 else: 151 return False 152 153 def cancelled(self): 154 if self.parent: 155 return self.parent.cancelled() 156 else: 157 return False 158 159 def running(self): 160 if self.parent: 161 return self.parent.running() 162 else: 163 return False 164 165 def done(self): 166 if self.parent: 167 return self.parent.done() 168 else: 169 return True 170 171 def exception(self, timeout=None): 172 if self.parent: 173 return self.parent.exception(timeout=timeout) 174 else: 175 return True 176 177 def add_done_callback(self, fn): 178 if self.parent: 179 return self.parent.add_done_callback(fn) 180 else: 181 return None 182 183 def __repr__(self): 184 185 # The DataFuture could be wrapping an AppFuture whose parent is a Future 186 # check to find the top level parent 187 if isinstance(self.parent, AppFuture): 188 parent = self.parent.parent 189 else: 190 parent = self.parent 191 192 if parent: 193 with parent._condition: 194 if parent._state == FINISHED: 195 if parent._exception: 196 return '<%s at %#x state=%s raised %s>' % ( 197 self.__class__.__name__, 198 id(self), 199 _STATE_TO_DESCRIPTION_MAP[parent._state], 200 parent._exception.__class__.__name__) 201 else: 202 return '<%s at %#x state=%s returned %s>' % ( 203 self.__class__.__name__, 204 id(self), 205 _STATE_TO_DESCRIPTION_MAP[parent._state], 206 self.filepath + '_file') 207 return '<%s at %#x state=%s>' % ( 208 self.__class__.__name__, 209 id(self), 210 _STATE_TO_DESCRIPTION_MAP[parent._state]) 211 212 else: 213 return '<%s at %#x state=%s>' % ( 214 self.__class__.__name__, 215 id(self), 216 _STATE_TO_DESCRIPTION_MAP[self._state]) 217 218 219 def testing_nonfuture(): 220 fpath = '~/shuffled.txt' 221 df = DataFuture(None, fpath) 222 print(df) 223 print("Result : ", df.filepath) 224 assert df.filepath == os.path.abspath(os.path.expanduser(fpath)) 225 226 227 if __name__ == "__main__": 228 # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG) 229 import sys 230 import random 231 logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 232 logger.debug("Begin Testing") 233 234 with open('shuffled.txt', 'w') as testfile: 235 nums = list(range(0, 10000)) 236 random.shuffle(nums) 237 for item in nums: 238 testfile.write("{0}\n".format(item)) 239 240 foo = Future() 241 df = DataFuture(foo, './shuffled.txt') 242 dx = DataFuture(foo, '~/shuffled.txt') 243 244 print(foo.done()) 245 print(df.done()) 246 247 testing_nonfuture() 248 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/app/futures.py b/parsl/app/futures.py --- a/parsl/app/futures.py +++ b/parsl/app/futures.py @@ -74,7 +74,7 @@ """ super().__init__() self._tid = tid - if isinstance(file_obj, str): + if isinstance(file_obj, str) and not isinstance(file_obj, File): self.file_obj = File(file_obj) else: self.file_obj = file_obj
{"golden_diff": "diff --git a/parsl/app/futures.py b/parsl/app/futures.py\n--- a/parsl/app/futures.py\n+++ b/parsl/app/futures.py\n@@ -74,7 +74,7 @@\n \"\"\"\n super().__init__()\n self._tid = tid\n- if isinstance(file_obj, str):\n+ if isinstance(file_obj, str) and not isinstance(file_obj, File):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n", "issue": "Allow `DataFuture` to be initialized with a `str` file object\n[Here](https://github.com/Parsl/parsl/blob/master/parsl/app/futures.py#L77) we check if `file_obj` is `str`. Now that `File` is subclassed from `str`, this will always evaluate as `True`.\n", "before_files": [{"content": "\"\"\"This module implements DataFutures.\n\nWe have two basic types of futures:\n 1. DataFutures which represent data objects\n 2. AppFutures which represent the futures on App/Leaf tasks.\n\"\"\"\nimport os\nimport logging\nfrom concurrent.futures import Future\n\nfrom parsl.dataflow.futures import AppFuture\nfrom parsl.app.errors import *\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n# Possible future states (for internal use by the futures package).\nPENDING = 'PENDING'\nRUNNING = 'RUNNING'\n# The future was cancelled by the user...\nCANCELLED = 'CANCELLED'\n# ...and _Waiter.add_cancelled() was called by a worker.\nCANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'\nFINISHED = 'FINISHED'\n\n_STATE_TO_DESCRIPTION_MAP = {\n PENDING: \"pending\",\n RUNNING: \"running\",\n CANCELLED: \"cancelled\",\n CANCELLED_AND_NOTIFIED: \"cancelled\",\n FINISHED: \"finished\"\n}\n\n\nclass DataFuture(Future):\n \"\"\"A datafuture points at an AppFuture.\n\n We are simply wrapping a AppFuture, and adding the specific case where, if\n the future is resolved i.e file exists, then the DataFuture is assumed to be\n resolved.\n \"\"\"\n\n def parent_callback(self, parent_fu):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(parent_fu.result())\n return\n\n def __init__(self, fut, file_obj, parent=None, tid=None):\n \"\"\"Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - parent ()\n - tid (task_id) : Task id that this DataFuture tracks\n \"\"\"\n super().__init__()\n self._tid = tid\n if isinstance(file_obj, str):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n self.parent = parent\n self._exception = None\n\n if fut is None:\n logger.debug(\"Setting result to filepath since no future was passed\")\n self.set_result = self.file_obj\n\n else:\n if isinstance(fut, Future):\n self.parent = fut\n self.parent.add_done_callback(self.parent_callback)\n else:\n raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n logger.debug(\"Creating DataFuture with parent : %s\", parent)\n logger.debug(\"Filepath : %s\", self.filepath)\n\n @property\n def tid(self):\n \"\"\"Returns the task_id of the task that will resolve this DataFuture.\"\"\"\n return self._tid\n\n @property\n def filepath(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.file_obj.filepath\n\n @property\n def filename(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.filepath\n\n def result(self, timeout=None):\n \"\"\"A blocking call that returns either the result or raises an exception.\n\n Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when\n setup.\n\n Kwargs:\n - timeout (int): Timeout in seconds\n\n Returns:\n - If App completed successfully returns the filepath.\n\n Raises:\n - Exception raised by app if failed.\n\n \"\"\"\n if self.parent:\n if self.parent.done():\n # This explicit call to raise exceptions might be redundant.\n # the result() call *should* raise an exception if there's one\n e = self.parent._exception\n if e:\n raise e\n else:\n self.parent.result(timeout=timeout)\n else:\n self.parent.result(timeout=timeout)\n\n return self.file_obj\n\n def cancel(self):\n \"\"\"Cancel the task that this DataFuture is tracking.\n\n Note: This may not work\n \"\"\"\n if self.parent:\n return self.parent.cancel\n else:\n return False\n\n def cancelled(self):\n if self.parent:\n return self.parent.cancelled()\n else:\n return False\n\n def running(self):\n if self.parent:\n return self.parent.running()\n else:\n return False\n\n def done(self):\n if self.parent:\n return self.parent.done()\n else:\n return True\n\n def exception(self, timeout=None):\n if self.parent:\n return self.parent.exception(timeout=timeout)\n else:\n return True\n\n def add_done_callback(self, fn):\n if self.parent:\n return self.parent.add_done_callback(fn)\n else:\n return None\n\n def __repr__(self):\n\n # The DataFuture could be wrapping an AppFuture whose parent is a Future\n # check to find the top level parent\n if isinstance(self.parent, AppFuture):\n parent = self.parent.parent\n else:\n parent = self.parent\n\n if parent:\n with parent._condition:\n if parent._state == FINISHED:\n if parent._exception:\n return '<%s at %#x state=%s raised %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n parent._exception.__class__.__name__)\n else:\n return '<%s at %#x state=%s returned %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n self.filepath + '_file')\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state])\n\n else:\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[self._state])\n\n\ndef testing_nonfuture():\n fpath = '~/shuffled.txt'\n df = DataFuture(None, fpath)\n print(df)\n print(\"Result : \", df.filepath)\n assert df.filepath == os.path.abspath(os.path.expanduser(fpath))\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)\n import sys\n import random\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logger.debug(\"Begin Testing\")\n\n with open('shuffled.txt', 'w') as testfile:\n nums = list(range(0, 10000))\n random.shuffle(nums)\n for item in nums:\n testfile.write(\"{0}\\n\".format(item))\n\n foo = Future()\n df = DataFuture(foo, './shuffled.txt')\n dx = DataFuture(foo, '~/shuffled.txt')\n\n print(foo.done())\n print(df.done())\n\n testing_nonfuture()\n", "path": "parsl/app/futures.py"}], "after_files": [{"content": "\"\"\"This module implements DataFutures.\n\nWe have two basic types of futures:\n 1. DataFutures which represent data objects\n 2. AppFutures which represent the futures on App/Leaf tasks.\n\"\"\"\nimport os\nimport logging\nfrom concurrent.futures import Future\n\nfrom parsl.dataflow.futures import AppFuture\nfrom parsl.app.errors import *\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n# Possible future states (for internal use by the futures package).\nPENDING = 'PENDING'\nRUNNING = 'RUNNING'\n# The future was cancelled by the user...\nCANCELLED = 'CANCELLED'\n# ...and _Waiter.add_cancelled() was called by a worker.\nCANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'\nFINISHED = 'FINISHED'\n\n_STATE_TO_DESCRIPTION_MAP = {\n PENDING: \"pending\",\n RUNNING: \"running\",\n CANCELLED: \"cancelled\",\n CANCELLED_AND_NOTIFIED: \"cancelled\",\n FINISHED: \"finished\"\n}\n\n\nclass DataFuture(Future):\n \"\"\"A datafuture points at an AppFuture.\n\n We are simply wrapping a AppFuture, and adding the specific case where, if\n the future is resolved i.e file exists, then the DataFuture is assumed to be\n resolved.\n \"\"\"\n\n def parent_callback(self, parent_fu):\n \"\"\"Callback from executor future to update the parent.\n\n Args:\n - parent_fu (Future): Future returned by the executor along with callback\n\n Returns:\n - None\n\n Updates the super() with the result() or exception()\n \"\"\"\n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(parent_fu.result())\n return\n\n def __init__(self, fut, file_obj, parent=None, tid=None):\n \"\"\"Construct the DataFuture object.\n\n If the file_obj is a string convert to a File.\n\n Args:\n - fut (AppFuture) : AppFuture that this DataFuture will track\n - file_obj (string/File obj) : Something representing file(s)\n\n Kwargs:\n - parent ()\n - tid (task_id) : Task id that this DataFuture tracks\n \"\"\"\n super().__init__()\n self._tid = tid\n if isinstance(file_obj, str) and not isinstance(file_obj, File):\n self.file_obj = File(file_obj)\n else:\n self.file_obj = file_obj\n self.parent = parent\n self._exception = None\n\n if fut is None:\n logger.debug(\"Setting result to filepath since no future was passed\")\n self.set_result = self.file_obj\n\n else:\n if isinstance(fut, Future):\n self.parent = fut\n self.parent.add_done_callback(self.parent_callback)\n else:\n raise NotFutureError(\"DataFuture can be created only with a FunctionFuture on None\")\n\n logger.debug(\"Creating DataFuture with parent : %s\", parent)\n logger.debug(\"Filepath : %s\", self.filepath)\n\n @property\n def tid(self):\n \"\"\"Returns the task_id of the task that will resolve this DataFuture.\"\"\"\n return self._tid\n\n @property\n def filepath(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.file_obj.filepath\n\n @property\n def filename(self):\n \"\"\"Filepath of the File object this datafuture represents.\"\"\"\n return self.filepath\n\n def result(self, timeout=None):\n \"\"\"A blocking call that returns either the result or raises an exception.\n\n Assumptions : A DataFuture always has a parent AppFuture. The AppFuture does callbacks when\n setup.\n\n Kwargs:\n - timeout (int): Timeout in seconds\n\n Returns:\n - If App completed successfully returns the filepath.\n\n Raises:\n - Exception raised by app if failed.\n\n \"\"\"\n if self.parent:\n if self.parent.done():\n # This explicit call to raise exceptions might be redundant.\n # the result() call *should* raise an exception if there's one\n e = self.parent._exception\n if e:\n raise e\n else:\n self.parent.result(timeout=timeout)\n else:\n self.parent.result(timeout=timeout)\n\n return self.file_obj\n\n def cancel(self):\n \"\"\"Cancel the task that this DataFuture is tracking.\n\n Note: This may not work\n \"\"\"\n if self.parent:\n return self.parent.cancel\n else:\n return False\n\n def cancelled(self):\n if self.parent:\n return self.parent.cancelled()\n else:\n return False\n\n def running(self):\n if self.parent:\n return self.parent.running()\n else:\n return False\n\n def done(self):\n if self.parent:\n return self.parent.done()\n else:\n return True\n\n def exception(self, timeout=None):\n if self.parent:\n return self.parent.exception(timeout=timeout)\n else:\n return True\n\n def add_done_callback(self, fn):\n if self.parent:\n return self.parent.add_done_callback(fn)\n else:\n return None\n\n def __repr__(self):\n\n # The DataFuture could be wrapping an AppFuture whose parent is a Future\n # check to find the top level parent\n if isinstance(self.parent, AppFuture):\n parent = self.parent.parent\n else:\n parent = self.parent\n\n if parent:\n with parent._condition:\n if parent._state == FINISHED:\n if parent._exception:\n return '<%s at %#x state=%s raised %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n parent._exception.__class__.__name__)\n else:\n return '<%s at %#x state=%s returned %s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state],\n self.filepath + '_file')\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[parent._state])\n\n else:\n return '<%s at %#x state=%s>' % (\n self.__class__.__name__,\n id(self),\n _STATE_TO_DESCRIPTION_MAP[self._state])\n\n\ndef testing_nonfuture():\n fpath = '~/shuffled.txt'\n df = DataFuture(None, fpath)\n print(df)\n print(\"Result : \", df.filepath)\n assert df.filepath == os.path.abspath(os.path.expanduser(fpath))\n\n\nif __name__ == \"__main__\":\n # logging.basicConfig(filename='futures.testing.log',level=logging.DEBUG)\n import sys\n import random\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n logger.debug(\"Begin Testing\")\n\n with open('shuffled.txt', 'w') as testfile:\n nums = list(range(0, 10000))\n random.shuffle(nums)\n for item in nums:\n testfile.write(\"{0}\\n\".format(item))\n\n foo = Future()\n df = DataFuture(foo, './shuffled.txt')\n dx = DataFuture(foo, '~/shuffled.txt')\n\n print(foo.done())\n print(df.done())\n\n testing_nonfuture()\n", "path": "parsl/app/futures.py"}]}
2,549
112
gh_patches_debug_33525
rasdani/github-patches
git_diff
ivy-llc__ivy-26087
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- blackman_window --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/backends/jax/experimental/creation.py` Content: ``` 1 # global 2 from typing import Optional, Tuple 3 import math 4 import jax 5 import jax.numpy as jnp 6 import jaxlib.xla_extension 7 8 # local 9 from ivy.functional.backends.jax import JaxArray 10 import ivy 11 12 # Array API Standard # 13 # ------------------ # 14 15 16 def vorbis_window( 17 window_length: JaxArray, 18 *, 19 dtype: jnp.dtype = jnp.float32, 20 out: Optional[JaxArray] = None, 21 ) -> JaxArray: 22 return jnp.array( 23 [ 24 round( 25 math.sin( 26 (ivy.pi / 2) * (math.sin(ivy.pi * (i) / (window_length * 2)) ** 2) 27 ), 28 8, 29 ) 30 for i in range(1, window_length * 2)[0::2] 31 ], 32 dtype=dtype, 33 ) 34 35 36 def hann_window( 37 size: int, 38 /, 39 *, 40 periodic: bool = True, 41 dtype: Optional[jnp.dtype] = None, 42 out: Optional[JaxArray] = None, 43 ) -> JaxArray: 44 if size < 2: 45 return jnp.ones([size], dtype=dtype) 46 if periodic: 47 count = jnp.arange(size) / size 48 else: 49 count = jnp.linspace(start=0, stop=size, num=size) 50 return (0.5 - 0.5 * jnp.cos(2 * jnp.pi * count)).astype(dtype) 51 52 53 def kaiser_window( 54 window_length: int, 55 periodic: bool = True, 56 beta: float = 12.0, 57 *, 58 dtype: Optional[jnp.dtype] = None, 59 out: Optional[JaxArray] = None, 60 ) -> JaxArray: 61 if window_length < 2: 62 return jnp.ones([window_length], dtype=dtype) 63 if periodic is False: 64 return jnp.kaiser(M=window_length, beta=beta).astype(dtype) 65 else: 66 return jnp.kaiser(M=window_length + 1, beta=beta)[:-1].astype(dtype) 67 68 69 def tril_indices( 70 n_rows: int, 71 n_cols: Optional[int] = None, 72 k: int = 0, 73 /, 74 *, 75 device: jaxlib.xla_extension.Device, 76 ) -> Tuple[JaxArray, ...]: 77 return jnp.tril_indices(n=n_rows, k=k, m=n_cols) 78 79 80 def unsorted_segment_min( 81 data: JaxArray, 82 segment_ids: JaxArray, 83 num_segments: int, 84 ) -> JaxArray: 85 # added this check to keep the same behaviour as tensorflow 86 ivy.utils.assertions.check_unsorted_segment_min_valid_params( 87 data, segment_ids, num_segments 88 ) 89 return jax.ops.segment_min(data, segment_ids, num_segments) 90 91 92 def unsorted_segment_sum( 93 data: JaxArray, 94 segment_ids: JaxArray, 95 num_segments: int, 96 ) -> JaxArray: 97 # Used the same check which is used for unsorted_segment_min as 98 # the check should be same 99 # Might require to change the assertion function name to 100 # check_unsorted_segment_valid_params 101 ivy.utils.assertions.check_unsorted_segment_min_valid_params( 102 data, segment_ids, num_segments 103 ) 104 return jax.ops.segment_sum(data, segment_ids, num_segments) 105 106 107 def blackman_window( 108 size: int, 109 /, 110 *, 111 periodic: bool = True, 112 dtype: Optional[jnp.dtype] = None, 113 out: Optional[JaxArray] = None, 114 ) -> JaxArray: 115 if size < 2: 116 return jnp.ones([size], dtype=dtype) 117 if periodic: 118 count = jnp.arange(size) / size 119 else: 120 count = jnp.linspace(start=0, stop=size, num=size) 121 return (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count)) + ( 122 0.08 * jnp.cos(2 * jnp.pi * 2 * count) 123 ) 124 125 126 def trilu( 127 x: JaxArray, /, *, k: int = 0, upper: bool = True, out: Optional[JaxArray] = None 128 ) -> JaxArray: 129 if upper: 130 return jnp.triu(x, k) 131 return jnp.tril(x, k) 132 133 134 def mel_weight_matrix( 135 num_mel_bins: int, 136 dft_length: int, 137 sample_rate: int, 138 lower_edge_hertz: float = 0.0, 139 upper_edge_hertz: float = 3000.0, 140 ): 141 lower_edge_hertz = jnp.array(lower_edge_hertz) 142 upper_edge_hertz = jnp.array(upper_edge_hertz) 143 zero = jnp.array(0.0) 144 hz_to_mel = lambda f: 2595 * jnp.log10(1 + f / 700) 145 nyquist_hz = sample_rate / 2 146 linear_freqs = jnp.linspace(0, nyquist_hz, dft_length, dtype=jnp.float32)[1:] 147 spec_bin_mels = hz_to_mel(linear_freqs)[..., None] 148 mel_edges = jnp.linspace( 149 hz_to_mel(lower_edge_hertz), 150 hz_to_mel(upper_edge_hertz), 151 num_mel_bins + 2, 152 dtype=jnp.float32, 153 ) 154 mel_edges = jnp.stack([mel_edges[i : i + 3] for i in range(num_mel_bins)]) 155 lower_edge_mel, center_mel, upper_edge_mel = [ 156 t.reshape((1, num_mel_bins)) for t in jnp.split(mel_edges, 3, axis=1) 157 ] 158 lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel) 159 upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel) 160 mel_weights = jnp.maximum(zero, jnp.minimum(lower_slopes, upper_slopes)) 161 return jnp.pad(mel_weights, [[1, 0], [0, 0]]) 162 ``` Path: `ivy/functional/backends/tensorflow/experimental/creation.py` Content: ``` 1 # global 2 3 from typing import Union, Optional, Tuple 4 import tensorflow as tf 5 6 # local 7 from ivy.func_wrapper import with_unsupported_device_and_dtypes, with_unsupported_dtypes 8 from .. import backend_version 9 10 11 # Array API Standard # 12 # -------------------# 13 14 15 @with_unsupported_device_and_dtypes( 16 {"2.13.0 and below": {"cpu": ("bfloat16",)}}, 17 backend_version, 18 ) 19 def kaiser_window( 20 window_length: int, 21 periodic: bool = True, 22 beta: float = 12.0, 23 *, 24 dtype: Optional[tf.DType] = None, 25 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 26 ) -> Union[tf.Tensor, tf.Variable]: 27 if window_length < 2: 28 return tf.ones([window_length], dtype=dtype) 29 if periodic is False: 30 return tf.signal.kaiser_window(window_length, beta, dtype=dtype) 31 else: 32 return tf.signal.kaiser_window(window_length + 1, beta, dtype=dtype)[:-1] 33 34 35 def kaiser_bessel_derived_window( 36 window_length: int, 37 beta: float = 12.0, 38 *, 39 dtype: Optional[tf.DType] = None, 40 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 41 ) -> Union[tf.Tensor, tf.Variable]: 42 return tf.signal.kaiser_bessel_derived_window(window_length, beta, dtype) 43 44 45 def vorbis_window( 46 window_length: Union[tf.Tensor, tf.Variable], 47 *, 48 dtype: tf.DType = tf.dtypes.float32, 49 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 50 ) -> Union[tf.Tensor, tf.Variable]: 51 return tf.signal.vorbis_window(window_length, dtype=dtype, name=None) 52 53 54 def hann_window( 55 size: int, 56 /, 57 *, 58 periodic: bool = True, 59 dtype: Optional[tf.DType] = None, 60 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 61 ) -> Union[tf.Tensor, tf.Variable]: 62 if size < 2: 63 return tf.ones([size], dtype=dtype) 64 if periodic: 65 return tf.signal.hann_window(size + 1, periodic=False, dtype=dtype)[:-1] 66 else: 67 return tf.signal.hann_window(size, periodic=False, dtype=dtype) 68 69 70 def tril_indices( 71 n_rows: int, 72 n_cols: Optional[int] = None, 73 k: int = 0, 74 /, 75 *, 76 device: str, 77 ) -> Tuple[Union[tf.Tensor, tf.Variable], ...]: 78 n_cols = n_rows if n_cols is None else n_cols 79 80 if n_rows < 0 or n_cols < 0: 81 n_rows, n_cols = 0, 0 82 83 ret = [[], []] 84 85 for i in range(-min(k, 0), n_rows, 1): 86 for j in range(0, min(n_cols, k + i + 1), 1): 87 ret[0].append(i) 88 ret[1].append(j) 89 90 return tuple(tf.convert_to_tensor(ret, dtype=tf.int64)) 91 92 93 def unsorted_segment_min( 94 data: tf.Tensor, 95 segment_ids: tf.Tensor, 96 num_segments: Union[int, tf.Tensor], 97 ) -> tf.Tensor: 98 return tf.math.unsorted_segment_min(data, segment_ids, num_segments) 99 100 101 def blackman_window( 102 size: int, 103 /, 104 *, 105 periodic: bool = True, 106 dtype: Optional[tf.DType] = None, 107 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 108 ) -> Union[tf.Tensor, tf.Variable]: 109 if size < 2: 110 return tf.ones([size], dtype=tf.result_type(size, 0.0)) 111 if periodic: 112 count = tf.arange(size) / size 113 else: 114 count = tf.linspace(start=0, stop=size, num=size) 115 116 return (0.42 - 0.5 * tf.cos(2 * tf.pi * count)) + ( 117 0.08 * tf.cos(2 * tf.pi * 2 * count) 118 ) 119 120 121 def unsorted_segment_sum( 122 data: tf.Tensor, 123 segment_ids: tf.Tensor, 124 num_segments: Union[int, tf.Tensor], 125 ) -> tf.Tensor: 126 return tf.math.unsorted_segment_sum(data, segment_ids, num_segments) 127 128 129 @with_unsupported_dtypes({"2.13.0 and below": ("bool",)}, backend_version) 130 def trilu( 131 x: Union[tf.Tensor, tf.Variable], 132 /, 133 *, 134 k: int = 0, 135 upper: bool = True, 136 out: Optional[Union[tf.Tensor, tf.Variable]] = None, 137 ) -> Union[tf.Tensor, tf.Variable]: 138 if upper: 139 return tf.experimental.numpy.triu(x, k) 140 return tf.experimental.numpy.tril(x, k) 141 142 143 def mel_weight_matrix( 144 num_mel_bins: int, 145 dft_length: int, 146 sample_rate: int, 147 lower_edge_hertz: float = 125.0, 148 upper_edge_hertz: float = 3000.0, 149 ): 150 return tf.signal.linear_to_mel_weight_matrix( 151 num_mel_bins, 152 dft_length, 153 sample_rate, 154 lower_edge_hertz=lower_edge_hertz, 155 upper_edge_hertz=upper_edge_hertz, 156 ) 157 ``` Path: `ivy/functional/frontends/torch/spectral_ops.py` Content: ``` 1 import ivy 2 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 3 4 5 @to_ivy_arrays_and_back 6 def bartlett_window( 7 window_length, 8 periodic=True, 9 *, 10 dtype=None, 11 layout=None, 12 device=None, 13 requires_grad=False 14 ): 15 # this implementation is based on scipy.signal.windows.bartlett 16 # https://github.com/scipy/scipy/blob/v1.11.2/scipy/signal/windows/_windows.py#L625-L721 17 if int(window_length) != window_length or window_length < 0: 18 raise ValueError("Window length must be a non-negative integer") 19 elif window_length == 1: 20 return ivy.ones(window_length) 21 else: 22 N = window_length + 1 if periodic else window_length 23 24 res = ivy.arange(0, N, dtype=dtype) 25 res = ivy.where( 26 ivy.less_equal(res, (N - 1) / 2.0), 27 2.0 * res / (N - 1), 28 2.0 - 2.0 * res / (N - 1), 29 ) 30 31 return res[:-1] if periodic else res 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/backends/jax/experimental/creation.py b/ivy/functional/backends/jax/experimental/creation.py --- a/ivy/functional/backends/jax/experimental/creation.py +++ b/ivy/functional/backends/jax/experimental/creation.py @@ -118,9 +118,10 @@ count = jnp.arange(size) / size else: count = jnp.linspace(start=0, stop=size, num=size) - return (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count)) + ( - 0.08 * jnp.cos(2 * jnp.pi * 2 * count) - ) + return ( + (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count)) + + (0.08 * jnp.cos(2 * jnp.pi * 2 * count)) + ).astype(dtype) def trilu( diff --git a/ivy/functional/backends/tensorflow/experimental/creation.py b/ivy/functional/backends/tensorflow/experimental/creation.py --- a/ivy/functional/backends/tensorflow/experimental/creation.py +++ b/ivy/functional/backends/tensorflow/experimental/creation.py @@ -107,14 +107,18 @@ out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: if size < 2: - return tf.ones([size], dtype=tf.result_type(size, 0.0)) + return tf.cast( + tf.ones([size], dtype=tf.experimental.numpy.result_type(size, 0.0)), + dtype=dtype, + ) if periodic: - count = tf.arange(size) / size + count = tf.experimental.numpy.arange(size) / size else: count = tf.linspace(start=0, stop=size, num=size) - - return (0.42 - 0.5 * tf.cos(2 * tf.pi * count)) + ( - 0.08 * tf.cos(2 * tf.pi * 2 * count) + return tf.cast( + (0.42 - 0.5 * tf.cos(2 * tf.experimental.numpy.pi * count)) + + (0.08 * tf.cos(2 * tf.experimental.numpy.pi * 2 * count)), + dtype=dtype, ) diff --git a/ivy/functional/frontends/torch/spectral_ops.py b/ivy/functional/frontends/torch/spectral_ops.py --- a/ivy/functional/frontends/torch/spectral_ops.py +++ b/ivy/functional/frontends/torch/spectral_ops.py @@ -1,5 +1,6 @@ import ivy from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back +from ivy.func_wrapper import with_supported_dtypes @to_ivy_arrays_and_back @@ -29,3 +30,17 @@ ) return res[:-1] if periodic else res + + +@to_ivy_arrays_and_back +@with_supported_dtypes({"2.51.0 and below": ("float32", "float64")}, "torch") +def blackman_window( + window_length, + periodic=True, + *, + dtype=None, + layout=None, + device=None, + requires_grad=False +): + return ivy.blackman_window(window_length, periodic=periodic, dtype=dtype)
{"golden_diff": "diff --git a/ivy/functional/backends/jax/experimental/creation.py b/ivy/functional/backends/jax/experimental/creation.py\n--- a/ivy/functional/backends/jax/experimental/creation.py\n+++ b/ivy/functional/backends/jax/experimental/creation.py\n@@ -118,9 +118,10 @@\n count = jnp.arange(size) / size\n else:\n count = jnp.linspace(start=0, stop=size, num=size)\n- return (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count)) + (\n- 0.08 * jnp.cos(2 * jnp.pi * 2 * count)\n- )\n+ return (\n+ (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count))\n+ + (0.08 * jnp.cos(2 * jnp.pi * 2 * count))\n+ ).astype(dtype)\n \n \n def trilu(\ndiff --git a/ivy/functional/backends/tensorflow/experimental/creation.py b/ivy/functional/backends/tensorflow/experimental/creation.py\n--- a/ivy/functional/backends/tensorflow/experimental/creation.py\n+++ b/ivy/functional/backends/tensorflow/experimental/creation.py\n@@ -107,14 +107,18 @@\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n ) -> Union[tf.Tensor, tf.Variable]:\n if size < 2:\n- return tf.ones([size], dtype=tf.result_type(size, 0.0))\n+ return tf.cast(\n+ tf.ones([size], dtype=tf.experimental.numpy.result_type(size, 0.0)),\n+ dtype=dtype,\n+ )\n if periodic:\n- count = tf.arange(size) / size\n+ count = tf.experimental.numpy.arange(size) / size\n else:\n count = tf.linspace(start=0, stop=size, num=size)\n-\n- return (0.42 - 0.5 * tf.cos(2 * tf.pi * count)) + (\n- 0.08 * tf.cos(2 * tf.pi * 2 * count)\n+ return tf.cast(\n+ (0.42 - 0.5 * tf.cos(2 * tf.experimental.numpy.pi * count))\n+ + (0.08 * tf.cos(2 * tf.experimental.numpy.pi * 2 * count)),\n+ dtype=dtype,\n )\n \n \ndiff --git a/ivy/functional/frontends/torch/spectral_ops.py b/ivy/functional/frontends/torch/spectral_ops.py\n--- a/ivy/functional/frontends/torch/spectral_ops.py\n+++ b/ivy/functional/frontends/torch/spectral_ops.py\n@@ -1,5 +1,6 @@\n import ivy\n from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n+from ivy.func_wrapper import with_supported_dtypes\n \n \n @to_ivy_arrays_and_back\n@@ -29,3 +30,17 @@\n )\n \n return res[:-1] if periodic else res\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.51.0 and below\": (\"float32\", \"float64\")}, \"torch\")\n+def blackman_window(\n+ window_length,\n+ periodic=True,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False\n+):\n+ return ivy.blackman_window(window_length, periodic=periodic, dtype=dtype)\n", "issue": "blackman_window\n\n", "before_files": [{"content": "# global\nfrom typing import Optional, Tuple\nimport math\nimport jax\nimport jax.numpy as jnp\nimport jaxlib.xla_extension\n\n# local\nfrom ivy.functional.backends.jax import JaxArray\nimport ivy\n\n# Array API Standard #\n# ------------------ #\n\n\ndef vorbis_window(\n window_length: JaxArray,\n *,\n dtype: jnp.dtype = jnp.float32,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.array(\n [\n round(\n math.sin(\n (ivy.pi / 2) * (math.sin(ivy.pi * (i) / (window_length * 2)) ** 2)\n ),\n 8,\n )\n for i in range(1, window_length * 2)[0::2]\n ],\n dtype=dtype,\n )\n\n\ndef hann_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if size < 2:\n return jnp.ones([size], dtype=dtype)\n if periodic:\n count = jnp.arange(size) / size\n else:\n count = jnp.linspace(start=0, stop=size, num=size)\n return (0.5 - 0.5 * jnp.cos(2 * jnp.pi * count)).astype(dtype)\n\n\ndef kaiser_window(\n window_length: int,\n periodic: bool = True,\n beta: float = 12.0,\n *,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if window_length < 2:\n return jnp.ones([window_length], dtype=dtype)\n if periodic is False:\n return jnp.kaiser(M=window_length, beta=beta).astype(dtype)\n else:\n return jnp.kaiser(M=window_length + 1, beta=beta)[:-1].astype(dtype)\n\n\ndef tril_indices(\n n_rows: int,\n n_cols: Optional[int] = None,\n k: int = 0,\n /,\n *,\n device: jaxlib.xla_extension.Device,\n) -> Tuple[JaxArray, ...]:\n return jnp.tril_indices(n=n_rows, k=k, m=n_cols)\n\n\ndef unsorted_segment_min(\n data: JaxArray,\n segment_ids: JaxArray,\n num_segments: int,\n) -> JaxArray:\n # added this check to keep the same behaviour as tensorflow\n ivy.utils.assertions.check_unsorted_segment_min_valid_params(\n data, segment_ids, num_segments\n )\n return jax.ops.segment_min(data, segment_ids, num_segments)\n\n\ndef unsorted_segment_sum(\n data: JaxArray,\n segment_ids: JaxArray,\n num_segments: int,\n) -> JaxArray:\n # Used the same check which is used for unsorted_segment_min as\n # the check should be same\n # Might require to change the assertion function name to\n # check_unsorted_segment_valid_params\n ivy.utils.assertions.check_unsorted_segment_min_valid_params(\n data, segment_ids, num_segments\n )\n return jax.ops.segment_sum(data, segment_ids, num_segments)\n\n\ndef blackman_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if size < 2:\n return jnp.ones([size], dtype=dtype)\n if periodic:\n count = jnp.arange(size) / size\n else:\n count = jnp.linspace(start=0, stop=size, num=size)\n return (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count)) + (\n 0.08 * jnp.cos(2 * jnp.pi * 2 * count)\n )\n\n\ndef trilu(\n x: JaxArray, /, *, k: int = 0, upper: bool = True, out: Optional[JaxArray] = None\n) -> JaxArray:\n if upper:\n return jnp.triu(x, k)\n return jnp.tril(x, k)\n\n\ndef mel_weight_matrix(\n num_mel_bins: int,\n dft_length: int,\n sample_rate: int,\n lower_edge_hertz: float = 0.0,\n upper_edge_hertz: float = 3000.0,\n):\n lower_edge_hertz = jnp.array(lower_edge_hertz)\n upper_edge_hertz = jnp.array(upper_edge_hertz)\n zero = jnp.array(0.0)\n hz_to_mel = lambda f: 2595 * jnp.log10(1 + f / 700)\n nyquist_hz = sample_rate / 2\n linear_freqs = jnp.linspace(0, nyquist_hz, dft_length, dtype=jnp.float32)[1:]\n spec_bin_mels = hz_to_mel(linear_freqs)[..., None]\n mel_edges = jnp.linspace(\n hz_to_mel(lower_edge_hertz),\n hz_to_mel(upper_edge_hertz),\n num_mel_bins + 2,\n dtype=jnp.float32,\n )\n mel_edges = jnp.stack([mel_edges[i : i + 3] for i in range(num_mel_bins)])\n lower_edge_mel, center_mel, upper_edge_mel = [\n t.reshape((1, num_mel_bins)) for t in jnp.split(mel_edges, 3, axis=1)\n ]\n lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel)\n upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel)\n mel_weights = jnp.maximum(zero, jnp.minimum(lower_slopes, upper_slopes))\n return jnp.pad(mel_weights, [[1, 0], [0, 0]])\n", "path": "ivy/functional/backends/jax/experimental/creation.py"}, {"content": "# global\n\nfrom typing import Union, Optional, Tuple\nimport tensorflow as tf\n\n# local\nfrom ivy.func_wrapper import with_unsupported_device_and_dtypes, with_unsupported_dtypes\nfrom .. import backend_version\n\n\n# Array API Standard #\n# -------------------#\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.13.0 and below\": {\"cpu\": (\"bfloat16\",)}},\n backend_version,\n)\ndef kaiser_window(\n window_length: int,\n periodic: bool = True,\n beta: float = 12.0,\n *,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if window_length < 2:\n return tf.ones([window_length], dtype=dtype)\n if periodic is False:\n return tf.signal.kaiser_window(window_length, beta, dtype=dtype)\n else:\n return tf.signal.kaiser_window(window_length + 1, beta, dtype=dtype)[:-1]\n\n\ndef kaiser_bessel_derived_window(\n window_length: int,\n beta: float = 12.0,\n *,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n return tf.signal.kaiser_bessel_derived_window(window_length, beta, dtype)\n\n\ndef vorbis_window(\n window_length: Union[tf.Tensor, tf.Variable],\n *,\n dtype: tf.DType = tf.dtypes.float32,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n return tf.signal.vorbis_window(window_length, dtype=dtype, name=None)\n\n\ndef hann_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if size < 2:\n return tf.ones([size], dtype=dtype)\n if periodic:\n return tf.signal.hann_window(size + 1, periodic=False, dtype=dtype)[:-1]\n else:\n return tf.signal.hann_window(size, periodic=False, dtype=dtype)\n\n\ndef tril_indices(\n n_rows: int,\n n_cols: Optional[int] = None,\n k: int = 0,\n /,\n *,\n device: str,\n) -> Tuple[Union[tf.Tensor, tf.Variable], ...]:\n n_cols = n_rows if n_cols is None else n_cols\n\n if n_rows < 0 or n_cols < 0:\n n_rows, n_cols = 0, 0\n\n ret = [[], []]\n\n for i in range(-min(k, 0), n_rows, 1):\n for j in range(0, min(n_cols, k + i + 1), 1):\n ret[0].append(i)\n ret[1].append(j)\n\n return tuple(tf.convert_to_tensor(ret, dtype=tf.int64))\n\n\ndef unsorted_segment_min(\n data: tf.Tensor,\n segment_ids: tf.Tensor,\n num_segments: Union[int, tf.Tensor],\n) -> tf.Tensor:\n return tf.math.unsorted_segment_min(data, segment_ids, num_segments)\n\n\ndef blackman_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if size < 2:\n return tf.ones([size], dtype=tf.result_type(size, 0.0))\n if periodic:\n count = tf.arange(size) / size\n else:\n count = tf.linspace(start=0, stop=size, num=size)\n\n return (0.42 - 0.5 * tf.cos(2 * tf.pi * count)) + (\n 0.08 * tf.cos(2 * tf.pi * 2 * count)\n )\n\n\ndef unsorted_segment_sum(\n data: tf.Tensor,\n segment_ids: tf.Tensor,\n num_segments: Union[int, tf.Tensor],\n) -> tf.Tensor:\n return tf.math.unsorted_segment_sum(data, segment_ids, num_segments)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bool\",)}, backend_version)\ndef trilu(\n x: Union[tf.Tensor, tf.Variable],\n /,\n *,\n k: int = 0,\n upper: bool = True,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if upper:\n return tf.experimental.numpy.triu(x, k)\n return tf.experimental.numpy.tril(x, k)\n\n\ndef mel_weight_matrix(\n num_mel_bins: int,\n dft_length: int,\n sample_rate: int,\n lower_edge_hertz: float = 125.0,\n upper_edge_hertz: float = 3000.0,\n):\n return tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins,\n dft_length,\n sample_rate,\n lower_edge_hertz=lower_edge_hertz,\n upper_edge_hertz=upper_edge_hertz,\n )\n", "path": "ivy/functional/backends/tensorflow/experimental/creation.py"}, {"content": "import ivy\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef bartlett_window(\n window_length,\n periodic=True,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False\n):\n # this implementation is based on scipy.signal.windows.bartlett\n # https://github.com/scipy/scipy/blob/v1.11.2/scipy/signal/windows/_windows.py#L625-L721\n if int(window_length) != window_length or window_length < 0:\n raise ValueError(\"Window length must be a non-negative integer\")\n elif window_length == 1:\n return ivy.ones(window_length)\n else:\n N = window_length + 1 if periodic else window_length\n\n res = ivy.arange(0, N, dtype=dtype)\n res = ivy.where(\n ivy.less_equal(res, (N - 1) / 2.0),\n 2.0 * res / (N - 1),\n 2.0 - 2.0 * res / (N - 1),\n )\n\n return res[:-1] if periodic else res\n", "path": "ivy/functional/frontends/torch/spectral_ops.py"}], "after_files": [{"content": "# global\nfrom typing import Optional, Tuple\nimport math\nimport jax\nimport jax.numpy as jnp\nimport jaxlib.xla_extension\n\n# local\nfrom ivy.functional.backends.jax import JaxArray\nimport ivy\n\n# Array API Standard #\n# ------------------ #\n\n\ndef vorbis_window(\n window_length: JaxArray,\n *,\n dtype: jnp.dtype = jnp.float32,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n return jnp.array(\n [\n round(\n math.sin(\n (ivy.pi / 2) * (math.sin(ivy.pi * (i) / (window_length * 2)) ** 2)\n ),\n 8,\n )\n for i in range(1, window_length * 2)[0::2]\n ],\n dtype=dtype,\n )\n\n\ndef hann_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if size < 2:\n return jnp.ones([size], dtype=dtype)\n if periodic:\n count = jnp.arange(size) / size\n else:\n count = jnp.linspace(start=0, stop=size, num=size)\n return (0.5 - 0.5 * jnp.cos(2 * jnp.pi * count)).astype(dtype)\n\n\ndef kaiser_window(\n window_length: int,\n periodic: bool = True,\n beta: float = 12.0,\n *,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if window_length < 2:\n return jnp.ones([window_length], dtype=dtype)\n if periodic is False:\n return jnp.kaiser(M=window_length, beta=beta).astype(dtype)\n else:\n return jnp.kaiser(M=window_length + 1, beta=beta)[:-1].astype(dtype)\n\n\ndef tril_indices(\n n_rows: int,\n n_cols: Optional[int] = None,\n k: int = 0,\n /,\n *,\n device: jaxlib.xla_extension.Device,\n) -> Tuple[JaxArray, ...]:\n return jnp.tril_indices(n=n_rows, k=k, m=n_cols)\n\n\ndef unsorted_segment_min(\n data: JaxArray,\n segment_ids: JaxArray,\n num_segments: int,\n) -> JaxArray:\n # added this check to keep the same behaviour as tensorflow\n ivy.utils.assertions.check_unsorted_segment_min_valid_params(\n data, segment_ids, num_segments\n )\n return jax.ops.segment_min(data, segment_ids, num_segments)\n\n\ndef unsorted_segment_sum(\n data: JaxArray,\n segment_ids: JaxArray,\n num_segments: int,\n) -> JaxArray:\n # Used the same check which is used for unsorted_segment_min as\n # the check should be same\n # Might require to change the assertion function name to\n # check_unsorted_segment_valid_params\n ivy.utils.assertions.check_unsorted_segment_min_valid_params(\n data, segment_ids, num_segments\n )\n return jax.ops.segment_sum(data, segment_ids, num_segments)\n\n\ndef blackman_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[jnp.dtype] = None,\n out: Optional[JaxArray] = None,\n) -> JaxArray:\n if size < 2:\n return jnp.ones([size], dtype=dtype)\n if periodic:\n count = jnp.arange(size) / size\n else:\n count = jnp.linspace(start=0, stop=size, num=size)\n return (\n (0.42 - 0.5 * jnp.cos(2 * jnp.pi * count))\n + (0.08 * jnp.cos(2 * jnp.pi * 2 * count))\n ).astype(dtype)\n\n\ndef trilu(\n x: JaxArray, /, *, k: int = 0, upper: bool = True, out: Optional[JaxArray] = None\n) -> JaxArray:\n if upper:\n return jnp.triu(x, k)\n return jnp.tril(x, k)\n\n\ndef mel_weight_matrix(\n num_mel_bins: int,\n dft_length: int,\n sample_rate: int,\n lower_edge_hertz: float = 0.0,\n upper_edge_hertz: float = 3000.0,\n):\n lower_edge_hertz = jnp.array(lower_edge_hertz)\n upper_edge_hertz = jnp.array(upper_edge_hertz)\n zero = jnp.array(0.0)\n hz_to_mel = lambda f: 2595 * jnp.log10(1 + f / 700)\n nyquist_hz = sample_rate / 2\n linear_freqs = jnp.linspace(0, nyquist_hz, dft_length, dtype=jnp.float32)[1:]\n spec_bin_mels = hz_to_mel(linear_freqs)[..., None]\n mel_edges = jnp.linspace(\n hz_to_mel(lower_edge_hertz),\n hz_to_mel(upper_edge_hertz),\n num_mel_bins + 2,\n dtype=jnp.float32,\n )\n mel_edges = jnp.stack([mel_edges[i : i + 3] for i in range(num_mel_bins)])\n lower_edge_mel, center_mel, upper_edge_mel = [\n t.reshape((1, num_mel_bins)) for t in jnp.split(mel_edges, 3, axis=1)\n ]\n lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel)\n upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel)\n mel_weights = jnp.maximum(zero, jnp.minimum(lower_slopes, upper_slopes))\n return jnp.pad(mel_weights, [[1, 0], [0, 0]])\n", "path": "ivy/functional/backends/jax/experimental/creation.py"}, {"content": "# global\n\nfrom typing import Union, Optional, Tuple\nimport tensorflow as tf\n\n# local\nfrom ivy.func_wrapper import with_unsupported_device_and_dtypes, with_unsupported_dtypes\nfrom .. import backend_version\n\n\n# Array API Standard #\n# -------------------#\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.13.0 and below\": {\"cpu\": (\"bfloat16\",)}},\n backend_version,\n)\ndef kaiser_window(\n window_length: int,\n periodic: bool = True,\n beta: float = 12.0,\n *,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if window_length < 2:\n return tf.ones([window_length], dtype=dtype)\n if periodic is False:\n return tf.signal.kaiser_window(window_length, beta, dtype=dtype)\n else:\n return tf.signal.kaiser_window(window_length + 1, beta, dtype=dtype)[:-1]\n\n\ndef kaiser_bessel_derived_window(\n window_length: int,\n beta: float = 12.0,\n *,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n return tf.signal.kaiser_bessel_derived_window(window_length, beta, dtype)\n\n\ndef vorbis_window(\n window_length: Union[tf.Tensor, tf.Variable],\n *,\n dtype: tf.DType = tf.dtypes.float32,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n return tf.signal.vorbis_window(window_length, dtype=dtype, name=None)\n\n\ndef hann_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if size < 2:\n return tf.ones([size], dtype=dtype)\n if periodic:\n return tf.signal.hann_window(size + 1, periodic=False, dtype=dtype)[:-1]\n else:\n return tf.signal.hann_window(size, periodic=False, dtype=dtype)\n\n\ndef tril_indices(\n n_rows: int,\n n_cols: Optional[int] = None,\n k: int = 0,\n /,\n *,\n device: str,\n) -> Tuple[Union[tf.Tensor, tf.Variable], ...]:\n n_cols = n_rows if n_cols is None else n_cols\n\n if n_rows < 0 or n_cols < 0:\n n_rows, n_cols = 0, 0\n\n ret = [[], []]\n\n for i in range(-min(k, 0), n_rows, 1):\n for j in range(0, min(n_cols, k + i + 1), 1):\n ret[0].append(i)\n ret[1].append(j)\n\n return tuple(tf.convert_to_tensor(ret, dtype=tf.int64))\n\n\ndef unsorted_segment_min(\n data: tf.Tensor,\n segment_ids: tf.Tensor,\n num_segments: Union[int, tf.Tensor],\n) -> tf.Tensor:\n return tf.math.unsorted_segment_min(data, segment_ids, num_segments)\n\n\ndef blackman_window(\n size: int,\n /,\n *,\n periodic: bool = True,\n dtype: Optional[tf.DType] = None,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if size < 2:\n return tf.cast(\n tf.ones([size], dtype=tf.experimental.numpy.result_type(size, 0.0)),\n dtype=dtype,\n )\n if periodic:\n count = tf.experimental.numpy.arange(size) / size\n else:\n count = tf.linspace(start=0, stop=size, num=size)\n return tf.cast(\n (0.42 - 0.5 * tf.cos(2 * tf.experimental.numpy.pi * count))\n + (0.08 * tf.cos(2 * tf.experimental.numpy.pi * 2 * count)),\n dtype=dtype,\n )\n\n\ndef unsorted_segment_sum(\n data: tf.Tensor,\n segment_ids: tf.Tensor,\n num_segments: Union[int, tf.Tensor],\n) -> tf.Tensor:\n return tf.math.unsorted_segment_sum(data, segment_ids, num_segments)\n\n\n@with_unsupported_dtypes({\"2.13.0 and below\": (\"bool\",)}, backend_version)\ndef trilu(\n x: Union[tf.Tensor, tf.Variable],\n /,\n *,\n k: int = 0,\n upper: bool = True,\n out: Optional[Union[tf.Tensor, tf.Variable]] = None,\n) -> Union[tf.Tensor, tf.Variable]:\n if upper:\n return tf.experimental.numpy.triu(x, k)\n return tf.experimental.numpy.tril(x, k)\n\n\ndef mel_weight_matrix(\n num_mel_bins: int,\n dft_length: int,\n sample_rate: int,\n lower_edge_hertz: float = 125.0,\n upper_edge_hertz: float = 3000.0,\n):\n return tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins,\n dft_length,\n sample_rate,\n lower_edge_hertz=lower_edge_hertz,\n upper_edge_hertz=upper_edge_hertz,\n )\n", "path": "ivy/functional/backends/tensorflow/experimental/creation.py"}, {"content": "import ivy\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_supported_dtypes\n\n\n@to_ivy_arrays_and_back\ndef bartlett_window(\n window_length,\n periodic=True,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False\n):\n # this implementation is based on scipy.signal.windows.bartlett\n # https://github.com/scipy/scipy/blob/v1.11.2/scipy/signal/windows/_windows.py#L625-L721\n if int(window_length) != window_length or window_length < 0:\n raise ValueError(\"Window length must be a non-negative integer\")\n elif window_length == 1:\n return ivy.ones(window_length)\n else:\n N = window_length + 1 if periodic else window_length\n\n res = ivy.arange(0, N, dtype=dtype)\n res = ivy.where(\n ivy.less_equal(res, (N - 1) / 2.0),\n 2.0 * res / (N - 1),\n 2.0 - 2.0 * res / (N - 1),\n )\n\n return res[:-1] if periodic else res\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.51.0 and below\": (\"float32\", \"float64\")}, \"torch\")\ndef blackman_window(\n window_length,\n periodic=True,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False\n):\n return ivy.blackman_window(window_length, periodic=periodic, dtype=dtype)\n", "path": "ivy/functional/frontends/torch/spectral_ops.py"}]}
3,945
800
gh_patches_debug_24915
rasdani/github-patches
git_diff
saleor__saleor-3848
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GraphQL error: Expected a value of type "WeightUnitsEnum" but received: kg ### Steps to reproduce the problem 1. Change the default language in the storefront to Portuguese. 2. Go to Dashboard 2.0. The homepage runs a query which fetches `defaultWeightUnit` field. This GraphQL request returns this error: `GraphQL error: Expected a value of type "WeightUnitsEnum" but received: kg` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/core/weight.py` Content: ``` 1 """In Saleor we are using 'weight' instead of a 'mass'. 2 3 For those of us who are earth-bound, weight is what we usually experience. 4 Mass is a theoretical construct. 5 Unless we are dealing with inertia and momentum, we are encountering 6 the attractive force between ourselves and the earth, 7 the isolated effects of mass alone being a little more esoteric. 8 9 So even though mass is more fundamental, most people think 10 in terms of weight. 11 12 In the end, it does not really matter unless you travel between 13 different planets. 14 """ 15 from decimal import Decimal 16 from enum import Enum 17 18 from django import forms 19 from django.contrib.sites.models import Site 20 from django.core.validators import MinValueValidator 21 from django.template.loader import render_to_string 22 from django.utils.translation import pgettext_lazy 23 from measurement.measures import Weight 24 25 26 class WeightUnits: 27 KILOGRAM = 'kg' 28 POUND = 'lb' 29 OUNCE = 'oz' 30 GRAM = 'g' 31 32 CHOICES = [ 33 (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')), 34 (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')), 35 (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')), 36 (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))] 37 38 39 WeightUnitsEnum = Enum( 40 'WeightUnitsEnum', 41 {unit: unit for unit in WeightUnits.CHOICES}) 42 43 44 def zero_weight(): 45 """Represent the zero weight value.""" 46 return Weight(kg=0) 47 48 49 def convert_weight(weight, unit): 50 # Weight amount from the Weight instance can be retrived in serveral units 51 # via its properties. eg. Weight(lb=10).kg 52 converted_weight = getattr(weight, unit) 53 return Weight(**{unit: converted_weight}) 54 55 56 def get_default_weight_unit(): 57 site = Site.objects.get_current() 58 return site.settings.default_weight_unit 59 60 61 class WeightInput(forms.TextInput): 62 template = 'dashboard/shipping/weight_widget.html' 63 input_type = 'number' 64 65 def format_value(self, value): 66 if isinstance(value, Weight): 67 unit = get_default_weight_unit() 68 if value.unit != unit: 69 value = convert_weight(value, unit) 70 return value.value 71 return value 72 73 def render(self, name, value, attrs=None, renderer=None): 74 widget = super().render(name, value, attrs=attrs, renderer=renderer) 75 unit = get_default_weight_unit() 76 translated_unit = dict(WeightUnits.CHOICES)[unit] 77 return render_to_string( 78 self.template, 79 {'widget': widget, 'value': value, 'unit': translated_unit}) 80 81 82 class WeightField(forms.FloatField): 83 def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs): 84 if isinstance(widget, type): 85 widget = widget(attrs={'type': 'number', 'step': 'any'}) 86 super().__init__(*args, widget=widget, **kwargs) 87 if min_value is not None: 88 self.validators.append(MinValueValidator(min_value)) 89 90 def to_python(self, value): 91 value = super().to_python(value) 92 if value is None: 93 return value 94 unit = get_default_weight_unit() 95 return Weight(**{unit: value}) 96 97 def validate(self, weight): 98 if weight is None or weight in self.empty_values: 99 super().validate(weight) 100 else: 101 unit = get_default_weight_unit() 102 if not isinstance(weight, Weight): 103 raise Exception( 104 '%r is not a valid weight.' % (weight,)) 105 if weight.unit != unit: 106 raise forms.ValidationError( 107 'Invalid unit: %r (expected %r).' % ( 108 weight.unit, unit)) 109 super().validate(weight.value) 110 111 def clean(self, value): 112 value = value_to_be_validated = self.to_python(value) 113 self.validate(value_to_be_validated) 114 if isinstance(value, Weight): 115 value_to_be_validated = Decimal(value.value) 116 # default decimal validators can be used for Weight's value only 117 self.run_validators(value_to_be_validated) 118 return value 119 ``` Path: `saleor/graphql/core/enums.py` Content: ``` 1 import graphene 2 3 from ...core import TaxRateType as CoreTaxRateType, weight 4 from ...core.permissions import MODELS_PERMISSIONS 5 from .utils import str_to_enum 6 7 8 class ReportingPeriod(graphene.Enum): 9 TODAY = 'TODAY' 10 THIS_MONTH = 'THIS_MONTH' 11 12 13 TaxRateType = graphene.Enum( 14 'TaxRateType', 15 [(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES]) 16 17 18 PermissionEnum = graphene.Enum( 19 'PermissionEnum', [ 20 (str_to_enum(codename.split('.')[1]), codename) 21 for codename in MODELS_PERMISSIONS]) 22 23 24 WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/core/weight.py b/saleor/core/weight.py --- a/saleor/core/weight.py +++ b/saleor/core/weight.py @@ -13,7 +13,6 @@ different planets. """ from decimal import Decimal -from enum import Enum from django import forms from django.contrib.sites.models import Site @@ -36,11 +35,6 @@ (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))] -WeightUnitsEnum = Enum( - 'WeightUnitsEnum', - {unit: unit for unit in WeightUnits.CHOICES}) - - def zero_weight(): """Represent the zero weight value.""" return Weight(kg=0) diff --git a/saleor/graphql/core/enums.py b/saleor/graphql/core/enums.py --- a/saleor/graphql/core/enums.py +++ b/saleor/graphql/core/enums.py @@ -1,6 +1,7 @@ import graphene -from ...core import TaxRateType as CoreTaxRateType, weight +from ...core import TaxRateType as CoreTaxRateType +from ...core.weight import WeightUnits from ...core.permissions import MODELS_PERMISSIONS from .utils import str_to_enum @@ -21,4 +22,6 @@ for codename in MODELS_PERMISSIONS]) -WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum) +WeightUnitsEnum = graphene.Enum( + 'WeightUnitsEnum', + [(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])
{"golden_diff": "diff --git a/saleor/core/weight.py b/saleor/core/weight.py\n--- a/saleor/core/weight.py\n+++ b/saleor/core/weight.py\n@@ -13,7 +13,6 @@\n different planets.\n \"\"\"\n from decimal import Decimal\n-from enum import Enum\n \n from django import forms\n from django.contrib.sites.models import Site\n@@ -36,11 +35,6 @@\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n \n \n-WeightUnitsEnum = Enum(\n- 'WeightUnitsEnum',\n- {unit: unit for unit in WeightUnits.CHOICES})\n-\n-\n def zero_weight():\n \"\"\"Represent the zero weight value.\"\"\"\n return Weight(kg=0)\ndiff --git a/saleor/graphql/core/enums.py b/saleor/graphql/core/enums.py\n--- a/saleor/graphql/core/enums.py\n+++ b/saleor/graphql/core/enums.py\n@@ -1,6 +1,7 @@\n import graphene\n \n-from ...core import TaxRateType as CoreTaxRateType, weight\n+from ...core import TaxRateType as CoreTaxRateType\n+from ...core.weight import WeightUnits\n from ...core.permissions import MODELS_PERMISSIONS\n from .utils import str_to_enum\n \n@@ -21,4 +22,6 @@\n for codename in MODELS_PERMISSIONS])\n \n \n-WeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n+WeightUnitsEnum = graphene.Enum(\n+ 'WeightUnitsEnum',\n+ [(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])\n", "issue": "GraphQL error: Expected a value of type \"WeightUnitsEnum\" but received: kg\n### Steps to reproduce the problem\r\n1. Change the default language in the storefront to Portuguese.\r\n2. Go to Dashboard 2.0. The homepage runs a query which fetches `defaultWeightUnit` field.\r\nThis GraphQL request returns this error: `GraphQL error: Expected a value of type \"WeightUnitsEnum\" but received: kg`\r\n\n", "before_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\nfrom enum import Enum\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\nWeightUnitsEnum = Enum(\n 'WeightUnitsEnum',\n {unit: unit for unit in WeightUnits.CHOICES})\n\n\ndef zero_weight():\n \"\"\"Represent the zero weight value.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None, renderer=None):\n widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}, {"content": "import graphene\n\nfrom ...core import TaxRateType as CoreTaxRateType, weight\nfrom ...core.permissions import MODELS_PERMISSIONS\nfrom .utils import str_to_enum\n\n\nclass ReportingPeriod(graphene.Enum):\n TODAY = 'TODAY'\n THIS_MONTH = 'THIS_MONTH'\n\n\nTaxRateType = graphene.Enum(\n 'TaxRateType',\n [(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES])\n\n\nPermissionEnum = graphene.Enum(\n 'PermissionEnum', [\n (str_to_enum(codename.split('.')[1]), codename)\n for codename in MODELS_PERMISSIONS])\n\n\nWeightUnitsEnum = graphene.Enum.from_enum(weight.WeightUnitsEnum)\n", "path": "saleor/graphql/core/enums.py"}], "after_files": [{"content": "\"\"\"In Saleor we are using 'weight' instead of a 'mass'.\n\nFor those of us who are earth-bound, weight is what we usually experience.\nMass is a theoretical construct.\nUnless we are dealing with inertia and momentum, we are encountering\nthe attractive force between ourselves and the earth,\nthe isolated effects of mass alone being a little more esoteric.\n\nSo even though mass is more fundamental, most people think\nin terms of weight.\n\nIn the end, it does not really matter unless you travel between\ndifferent planets.\n\"\"\"\nfrom decimal import Decimal\n\nfrom django import forms\nfrom django.contrib.sites.models import Site\nfrom django.core.validators import MinValueValidator\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import pgettext_lazy\nfrom measurement.measures import Weight\n\n\nclass WeightUnits:\n KILOGRAM = 'kg'\n POUND = 'lb'\n OUNCE = 'oz'\n GRAM = 'g'\n\n CHOICES = [\n (KILOGRAM, pgettext_lazy('Kilogram weight unit symbol', 'kg')),\n (POUND, pgettext_lazy('Pound weight unit symbol', 'lb')),\n (OUNCE, pgettext_lazy('Ounce weight unit symbol', 'oz')),\n (GRAM, pgettext_lazy('Gram weight unit symbol', 'g'))]\n\n\ndef zero_weight():\n \"\"\"Represent the zero weight value.\"\"\"\n return Weight(kg=0)\n\n\ndef convert_weight(weight, unit):\n # Weight amount from the Weight instance can be retrived in serveral units\n # via its properties. eg. Weight(lb=10).kg\n converted_weight = getattr(weight, unit)\n return Weight(**{unit: converted_weight})\n\n\ndef get_default_weight_unit():\n site = Site.objects.get_current()\n return site.settings.default_weight_unit\n\n\nclass WeightInput(forms.TextInput):\n template = 'dashboard/shipping/weight_widget.html'\n input_type = 'number'\n\n def format_value(self, value):\n if isinstance(value, Weight):\n unit = get_default_weight_unit()\n if value.unit != unit:\n value = convert_weight(value, unit)\n return value.value\n return value\n\n def render(self, name, value, attrs=None, renderer=None):\n widget = super().render(name, value, attrs=attrs, renderer=renderer)\n unit = get_default_weight_unit()\n translated_unit = dict(WeightUnits.CHOICES)[unit]\n return render_to_string(\n self.template,\n {'widget': widget, 'value': value, 'unit': translated_unit})\n\n\nclass WeightField(forms.FloatField):\n def __init__(self, *args, widget=WeightInput, min_value=0, **kwargs):\n if isinstance(widget, type):\n widget = widget(attrs={'type': 'number', 'step': 'any'})\n super().__init__(*args, widget=widget, **kwargs)\n if min_value is not None:\n self.validators.append(MinValueValidator(min_value))\n\n def to_python(self, value):\n value = super().to_python(value)\n if value is None:\n return value\n unit = get_default_weight_unit()\n return Weight(**{unit: value})\n\n def validate(self, weight):\n if weight is None or weight in self.empty_values:\n super().validate(weight)\n else:\n unit = get_default_weight_unit()\n if not isinstance(weight, Weight):\n raise Exception(\n '%r is not a valid weight.' % (weight,))\n if weight.unit != unit:\n raise forms.ValidationError(\n 'Invalid unit: %r (expected %r).' % (\n weight.unit, unit))\n super().validate(weight.value)\n\n def clean(self, value):\n value = value_to_be_validated = self.to_python(value)\n self.validate(value_to_be_validated)\n if isinstance(value, Weight):\n value_to_be_validated = Decimal(value.value)\n # default decimal validators can be used for Weight's value only\n self.run_validators(value_to_be_validated)\n return value\n", "path": "saleor/core/weight.py"}, {"content": "import graphene\n\nfrom ...core import TaxRateType as CoreTaxRateType\nfrom ...core.weight import WeightUnits\nfrom ...core.permissions import MODELS_PERMISSIONS\nfrom .utils import str_to_enum\n\n\nclass ReportingPeriod(graphene.Enum):\n TODAY = 'TODAY'\n THIS_MONTH = 'THIS_MONTH'\n\n\nTaxRateType = graphene.Enum(\n 'TaxRateType',\n [(str_to_enum(rate[0]), rate[0]) for rate in CoreTaxRateType.CHOICES])\n\n\nPermissionEnum = graphene.Enum(\n 'PermissionEnum', [\n (str_to_enum(codename.split('.')[1]), codename)\n for codename in MODELS_PERMISSIONS])\n\n\nWeightUnitsEnum = graphene.Enum(\n 'WeightUnitsEnum',\n [(str_to_enum(unit[0]), unit[0]) for unit in WeightUnits.CHOICES])\n", "path": "saleor/graphql/core/enums.py"}]}
1,705
350
gh_patches_debug_28744
rasdani/github-patches
git_diff
ethereum__web3.py-1109
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Connection to ropsten websocket over infura hang. * Version: 4.4.1 * Python: 3.6 * OS: osx ### What was wrong? I wasn't able to connect to infura's ropsten websocket. The process hung when I wanted to create the filter object on pending transactions. It worked for both rinkeby's and mainnet's but just not for ropsten network. I was wondering if anyone is having the same issue. Given that I was able to connect to two other networks it makes it less suspicious that the problem is caused by the web3.py Below is how I instantiate the filter by following https://web3py.readthedocs.io/en/stable/filters.html#web3.utils.filters.TransactionFilter ```python In [23]: web3 = Web3(Web3.WebsocketProvider('wss://rinkeby.infura.io/_ws')) In [24]: new_transaction_filter = web3.eth.filter('pending') In [25]: new_transaction_filter.get_new_entries() Out[25]: [HexBytes('0x4945f39cbeedd911eae93577c75d763d364c37ae01dcb7fb31ff9421a2a92b71'), HexBytes('0xae0b88b85163d3f0a918236cbf3ec93ccb589c6ad327e937f69b9591680fbf61'), ``` Process hung on line 27 for a long time. I interrupted. The stacktrace is below. ```python In [26]: web3 = Web3(Web3.WebsocketProvider('wss://ropsten.infura.io/ws')) In [27]: new_transaction_filter = web3.eth.filter('pending') ^C--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-3-60afc0d44840> in <module>() ----> 1 new_transaction_filter = web3.eth.filter('pending') ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/eth.py in filter(self, filter_params, filter_id) 319 elif filter_params == "pending": 320 filter_id = self.web3.manager.request_blocking( --> 321 "eth_newPendingTransactionFilter", [], 322 ) 323 return TransactionFilter(self.web3, filter_id) ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/manager.py in request_blocking(self, method, params) 105 Make a synchronous request using the provider 106 """ --> 107 response = self._make_request(method, params) 108 109 if "error" in response: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/manager.py in _make_request(self, method, params) 88 self.logger.debug("Making request. Method: %s", method) 89 try: ---> 90 return request_func(method, params) 91 except CannotHandleRequest: 92 continue ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/gas_price_strategy.py in middleware(method, params) 16 transaction = assoc(transaction, 'gasPrice', generated_gas_price) 17 return make_request(method, [transaction]) ---> 18 return make_request(method, params) 19 return middleware ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__() ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters) 48 response = make_request(method, formatted_params) 49 else: ---> 50 response = make_request(method, params) 51 52 if 'result' in response and method in result_formatters: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/attrdict.py in middleware(method, params) 16 """ 17 def middleware(method, params): ---> 18 response = make_request(method, params) 19 20 if 'result' in response: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__() ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters) 48 response = make_request(method, formatted_params) 49 else: ---> 50 response = make_request(method, params) 51 52 if 'result' in response and method in result_formatters: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/normalize_errors.py in middleware(method, params) 7 def normalize_errors_middleware(make_request, web3): 8 def middleware(method, params): ----> 9 result = make_request(method, params) 10 11 # As of v1.8, Geth returns errors when you request a ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__() ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters) 48 response = make_request(method, formatted_params) 49 else: ---> 50 response = make_request(method, params) 51 52 if 'result' in response and method in result_formatters: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__() ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters) 48 response = make_request(method, formatted_params) 49 else: ---> 50 response = make_request(method, params) 51 52 if 'result' in response and method in result_formatters: ~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params) 102 WebsocketProvider._loop 103 ) --> 104 return future.result() /usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/concurrent/futures/_base.py in result(self, timeout) 425 return self.__get_result() 426 --> 427 self._condition.wait(timeout) 428 429 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: /usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py in wait(self, timeout) 293 try: # restore state no matter what (e.g., KeyboardInterrupt) 294 if timeout is None: --> 295 waiter.acquire() 296 gotit = True 297 else: KeyboardInterrupt: ``` I have tried both `wss://ropsten.infura.io/ws` and `wss://ropsten.infura.io/_ws`. I have also posted the same question on Infura forum (https://github.com/INFURA/infura/issues/110). Trying to get help on both sides. ### How can it be fixed? TBD. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/providers/websocket.py` Content: ``` 1 import asyncio 2 import json 3 import logging 4 import os 5 from threading import ( 6 Thread, 7 ) 8 9 import websockets 10 11 from web3.exceptions import ( 12 ValidationError, 13 ) 14 from web3.providers.base import ( 15 JSONBaseProvider, 16 ) 17 18 RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'} 19 20 21 def _start_event_loop(loop): 22 asyncio.set_event_loop(loop) 23 loop.run_forever() 24 loop.close() 25 26 27 def _get_threaded_loop(): 28 new_loop = asyncio.new_event_loop() 29 thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True) 30 thread_loop.start() 31 return new_loop 32 33 34 def get_default_endpoint(): 35 return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546') 36 37 38 class PersistentWebSocket: 39 40 def __init__(self, endpoint_uri, loop, websocket_kwargs): 41 self.ws = None 42 self.endpoint_uri = endpoint_uri 43 self.loop = loop 44 self.websocket_kwargs = websocket_kwargs 45 46 async def __aenter__(self): 47 if self.ws is None: 48 self.ws = await websockets.connect( 49 uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs 50 ) 51 return self.ws 52 53 async def __aexit__(self, exc_type, exc_val, exc_tb): 54 if exc_val is not None: 55 try: 56 await self.ws.close() 57 except Exception: 58 pass 59 self.ws = None 60 61 62 class WebsocketProvider(JSONBaseProvider): 63 logger = logging.getLogger("web3.providers.WebsocketProvider") 64 _loop = None 65 66 def __init__(self, endpoint_uri=None, websocket_kwargs=None): 67 self.endpoint_uri = endpoint_uri 68 if self.endpoint_uri is None: 69 self.endpoint_uri = get_default_endpoint() 70 if WebsocketProvider._loop is None: 71 WebsocketProvider._loop = _get_threaded_loop() 72 if websocket_kwargs is None: 73 websocket_kwargs = {} 74 else: 75 found_restricted_keys = set(websocket_kwargs.keys()).intersection( 76 RESTRICTED_WEBSOCKET_KWARGS 77 ) 78 if found_restricted_keys: 79 raise ValidationError( 80 '{0} are not allowed in websocket_kwargs, ' 81 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys) 82 ) 83 self.conn = PersistentWebSocket( 84 self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs 85 ) 86 super().__init__() 87 88 def __str__(self): 89 return "WS connection {0}".format(self.endpoint_uri) 90 91 async def coro_make_request(self, request_data): 92 async with self.conn as conn: 93 await conn.send(request_data) 94 return json.loads(await conn.recv()) 95 96 def make_request(self, method, params): 97 self.logger.debug("Making request WebSocket. URI: %s, " 98 "Method: %s", self.endpoint_uri, method) 99 request_data = self.encode_rpc_request(method, params) 100 future = asyncio.run_coroutine_threadsafe( 101 self.coro_make_request(request_data), 102 WebsocketProvider._loop 103 ) 104 return future.result() 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py --- a/web3/providers/websocket.py +++ b/web3/providers/websocket.py @@ -16,6 +16,7 @@ ) RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'} +DEFAULT_WEBSOCKET_TIMEOUT = 10 def _start_event_loop(loop): @@ -63,8 +64,14 @@ logger = logging.getLogger("web3.providers.WebsocketProvider") _loop = None - def __init__(self, endpoint_uri=None, websocket_kwargs=None): + def __init__( + self, + endpoint_uri=None, + websocket_kwargs=None, + websocket_timeout=DEFAULT_WEBSOCKET_TIMEOUT + ): self.endpoint_uri = endpoint_uri + self.websocket_timeout = websocket_timeout if self.endpoint_uri is None: self.endpoint_uri = get_default_endpoint() if WebsocketProvider._loop is None: @@ -90,8 +97,16 @@ async def coro_make_request(self, request_data): async with self.conn as conn: - await conn.send(request_data) - return json.loads(await conn.recv()) + await asyncio.wait_for( + conn.send(request_data), + timeout=self.websocket_timeout + ) + return json.loads( + await asyncio.wait_for( + conn.recv(), + timeout=self.websocket_timeout + ) + ) def make_request(self, method, params): self.logger.debug("Making request WebSocket. URI: %s, "
{"golden_diff": "diff --git a/web3/providers/websocket.py b/web3/providers/websocket.py\n--- a/web3/providers/websocket.py\n+++ b/web3/providers/websocket.py\n@@ -16,6 +16,7 @@\n )\n \n RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}\n+DEFAULT_WEBSOCKET_TIMEOUT = 10\n \n \n def _start_event_loop(loop):\n@@ -63,8 +64,14 @@\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n \n- def __init__(self, endpoint_uri=None, websocket_kwargs=None):\n+ def __init__(\n+ self,\n+ endpoint_uri=None,\n+ websocket_kwargs=None,\n+ websocket_timeout=DEFAULT_WEBSOCKET_TIMEOUT\n+ ):\n self.endpoint_uri = endpoint_uri\n+ self.websocket_timeout = websocket_timeout\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n@@ -90,8 +97,16 @@\n \n async def coro_make_request(self, request_data):\n async with self.conn as conn:\n- await conn.send(request_data)\n- return json.loads(await conn.recv())\n+ await asyncio.wait_for(\n+ conn.send(request_data),\n+ timeout=self.websocket_timeout\n+ )\n+ return json.loads(\n+ await asyncio.wait_for(\n+ conn.recv(),\n+ timeout=self.websocket_timeout\n+ )\n+ )\n \n def make_request(self, method, params):\n self.logger.debug(\"Making request WebSocket. URI: %s, \"\n", "issue": "Connection to ropsten websocket over infura hang.\n* Version: 4.4.1\r\n* Python: 3.6\r\n* OS: osx\r\n\r\n\r\n### What was wrong?\r\n\r\nI wasn't able to connect to infura's ropsten websocket. The process hung when I wanted to create the filter object on pending transactions. It worked for both rinkeby's and mainnet's but just not for ropsten network.\r\n\r\nI was wondering if anyone is having the same issue. Given that I was able to connect to two other networks it makes it less suspicious that the problem is caused by the web3.py\r\n\r\nBelow is how I instantiate the filter by following https://web3py.readthedocs.io/en/stable/filters.html#web3.utils.filters.TransactionFilter\r\n```python\r\nIn [23]: web3 = Web3(Web3.WebsocketProvider('wss://rinkeby.infura.io/_ws'))\r\n\r\nIn [24]: new_transaction_filter = web3.eth.filter('pending')\r\n\r\nIn [25]: new_transaction_filter.get_new_entries()\r\nOut[25]:\r\n[HexBytes('0x4945f39cbeedd911eae93577c75d763d364c37ae01dcb7fb31ff9421a2a92b71'),\r\n HexBytes('0xae0b88b85163d3f0a918236cbf3ec93ccb589c6ad327e937f69b9591680fbf61'),\r\n```\r\n\r\nProcess hung on line 27 for a long time. I interrupted. The stacktrace is below.\r\n```python\r\nIn [26]: web3 = Web3(Web3.WebsocketProvider('wss://ropsten.infura.io/ws'))\r\n\r\nIn [27]: new_transaction_filter = web3.eth.filter('pending')\r\n\r\n^C---------------------------------------------------------------------------\r\nKeyboardInterrupt Traceback (most recent call last)\r\n<ipython-input-3-60afc0d44840> in <module>()\r\n----> 1 new_transaction_filter = web3.eth.filter('pending')\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/eth.py in filter(self, filter_params, filter_id)\r\n 319 elif filter_params == \"pending\":\r\n 320 filter_id = self.web3.manager.request_blocking(\r\n--> 321 \"eth_newPendingTransactionFilter\", [],\r\n 322 )\r\n 323 return TransactionFilter(self.web3, filter_id)\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/manager.py in request_blocking(self, method, params)\r\n 105 Make a synchronous request using the provider\r\n 106 \"\"\"\r\n--> 107 response = self._make_request(method, params)\r\n 108\r\n 109 if \"error\" in response:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/manager.py in _make_request(self, method, params)\r\n 88 self.logger.debug(\"Making request. Method: %s\", method)\r\n 89 try:\r\n---> 90 return request_func(method, params)\r\n 91 except CannotHandleRequest:\r\n 92 continue\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/gas_price_strategy.py in middleware(method, params)\r\n 16 transaction = assoc(transaction, 'gasPrice', generated_gas_price)\r\n 17 return make_request(method, [transaction])\r\n---> 18 return make_request(method, params)\r\n 19 return middleware\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__()\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters)\r\n 48 response = make_request(method, formatted_params)\r\n 49 else:\r\n---> 50 response = make_request(method, params)\r\n 51\r\n 52 if 'result' in response and method in result_formatters:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/attrdict.py in middleware(method, params)\r\n 16 \"\"\"\r\n 17 def middleware(method, params):\r\n---> 18 response = make_request(method, params)\r\n 19\r\n 20 if 'result' in response:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__()\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters)\r\n 48 response = make_request(method, formatted_params)\r\n 49 else:\r\n---> 50 response = make_request(method, params)\r\n 51\r\n 52 if 'result' in response and method in result_formatters:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/normalize_errors.py in middleware(method, params)\r\n 7 def normalize_errors_middleware(make_request, web3):\r\n 8 def middleware(method, params):\r\n----> 9 result = make_request(method, params)\r\n 10\r\n 11 # As of v1.8, Geth returns errors when you request a\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__()\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters)\r\n 48 response = make_request(method, formatted_params)\r\n 49 else:\r\n---> 50 response = make_request(method, params)\r\n 51\r\n 52 if 'result' in response and method in result_formatters:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/cytoolz/functoolz.pyx in cytoolz.functoolz.curry.__call__()\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/middleware/formatting.py in apply_formatters(method, params, make_request, request_formatters, result_formatters, error_formatters)\r\n 48 response = make_request(method, formatted_params)\r\n 49 else:\r\n---> 50 response = make_request(method, params)\r\n 51\r\n 52 if 'result' in response and method in result_formatters:\r\n\r\n~/pg/some_name/virtualenv_run/lib/python3.6/site-packages/web3/providers/websocket.py in make_request(self, method, params)\r\n 102 WebsocketProvider._loop\r\n 103 )\r\n--> 104 return future.result()\r\n\r\n/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)\r\n 425 return self.__get_result()\r\n 426\r\n--> 427 self._condition.wait(timeout)\r\n 428\r\n 429 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:\r\n\r\n/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py in wait(self, timeout)\r\n 293 try: # restore state no matter what (e.g., KeyboardInterrupt)\r\n 294 if timeout is None:\r\n--> 295 waiter.acquire()\r\n 296 gotit = True\r\n 297 else:\r\n\r\nKeyboardInterrupt:\r\n```\r\n\r\nI have tried both `wss://ropsten.infura.io/ws` and `wss://ropsten.infura.io/_ws`. I have also posted the same question on Infura forum (https://github.com/INFURA/infura/issues/110). Trying to get help on both sides.\r\n\r\n### How can it be fixed?\r\n\r\nTBD.\r\n\n", "before_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nfrom threading import (\n Thread,\n)\n\nimport websockets\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.providers.base import (\n JSONBaseProvider,\n)\n\nRESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}\n\n\ndef _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n loop.close()\n\n\ndef _get_threaded_loop():\n new_loop = asyncio.new_event_loop()\n thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)\n thread_loop.start()\n return new_loop\n\n\ndef get_default_endpoint():\n return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')\n\n\nclass PersistentWebSocket:\n\n def __init__(self, endpoint_uri, loop, websocket_kwargs):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n self.websocket_kwargs = websocket_kwargs\n\n async def __aenter__(self):\n if self.ws is None:\n self.ws = await websockets.connect(\n uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs\n )\n return self.ws\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if exc_val is not None:\n try:\n await self.ws.close()\n except Exception:\n pass\n self.ws = None\n\n\nclass WebsocketProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n\n def __init__(self, endpoint_uri=None, websocket_kwargs=None):\n self.endpoint_uri = endpoint_uri\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n if websocket_kwargs is None:\n websocket_kwargs = {}\n else:\n found_restricted_keys = set(websocket_kwargs.keys()).intersection(\n RESTRICTED_WEBSOCKET_KWARGS\n )\n if found_restricted_keys:\n raise ValidationError(\n '{0} are not allowed in websocket_kwargs, '\n 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)\n )\n self.conn = PersistentWebSocket(\n self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs\n )\n super().__init__()\n\n def __str__(self):\n return \"WS connection {0}\".format(self.endpoint_uri)\n\n async def coro_make_request(self, request_data):\n async with self.conn as conn:\n await conn.send(request_data)\n return json.loads(await conn.recv())\n\n def make_request(self, method, params):\n self.logger.debug(\"Making request WebSocket. URI: %s, \"\n \"Method: %s\", self.endpoint_uri, method)\n request_data = self.encode_rpc_request(method, params)\n future = asyncio.run_coroutine_threadsafe(\n self.coro_make_request(request_data),\n WebsocketProvider._loop\n )\n return future.result()\n", "path": "web3/providers/websocket.py"}], "after_files": [{"content": "import asyncio\nimport json\nimport logging\nimport os\nfrom threading import (\n Thread,\n)\n\nimport websockets\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.providers.base import (\n JSONBaseProvider,\n)\n\nRESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}\nDEFAULT_WEBSOCKET_TIMEOUT = 10\n\n\ndef _start_event_loop(loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()\n loop.close()\n\n\ndef _get_threaded_loop():\n new_loop = asyncio.new_event_loop()\n thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)\n thread_loop.start()\n return new_loop\n\n\ndef get_default_endpoint():\n return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')\n\n\nclass PersistentWebSocket:\n\n def __init__(self, endpoint_uri, loop, websocket_kwargs):\n self.ws = None\n self.endpoint_uri = endpoint_uri\n self.loop = loop\n self.websocket_kwargs = websocket_kwargs\n\n async def __aenter__(self):\n if self.ws is None:\n self.ws = await websockets.connect(\n uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs\n )\n return self.ws\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n if exc_val is not None:\n try:\n await self.ws.close()\n except Exception:\n pass\n self.ws = None\n\n\nclass WebsocketProvider(JSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.WebsocketProvider\")\n _loop = None\n\n def __init__(\n self,\n endpoint_uri=None,\n websocket_kwargs=None,\n websocket_timeout=DEFAULT_WEBSOCKET_TIMEOUT\n ):\n self.endpoint_uri = endpoint_uri\n self.websocket_timeout = websocket_timeout\n if self.endpoint_uri is None:\n self.endpoint_uri = get_default_endpoint()\n if WebsocketProvider._loop is None:\n WebsocketProvider._loop = _get_threaded_loop()\n if websocket_kwargs is None:\n websocket_kwargs = {}\n else:\n found_restricted_keys = set(websocket_kwargs.keys()).intersection(\n RESTRICTED_WEBSOCKET_KWARGS\n )\n if found_restricted_keys:\n raise ValidationError(\n '{0} are not allowed in websocket_kwargs, '\n 'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)\n )\n self.conn = PersistentWebSocket(\n self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs\n )\n super().__init__()\n\n def __str__(self):\n return \"WS connection {0}\".format(self.endpoint_uri)\n\n async def coro_make_request(self, request_data):\n async with self.conn as conn:\n await asyncio.wait_for(\n conn.send(request_data),\n timeout=self.websocket_timeout\n )\n return json.loads(\n await asyncio.wait_for(\n conn.recv(),\n timeout=self.websocket_timeout\n )\n )\n\n def make_request(self, method, params):\n self.logger.debug(\"Making request WebSocket. URI: %s, \"\n \"Method: %s\", self.endpoint_uri, method)\n request_data = self.encode_rpc_request(method, params)\n future = asyncio.run_coroutine_threadsafe(\n self.coro_make_request(request_data),\n WebsocketProvider._loop\n )\n return future.result()\n", "path": "web3/providers/websocket.py"}]}
3,054
345
gh_patches_debug_5236
rasdani/github-patches
git_diff
internetarchive__openlibrary-9322
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add a failing author name test <!-- What issue does this PR close? --> Demonstrates a bug in #9003 <!-- What does this PR achieve? [feature|hotfix|fix|refactor] --> `Drini` -> `ini` **Expected:** `Drini` -> `Drini` 😉 ### Technical <!-- What should be noted about the implementation? --> ### Testing <!-- Steps for reviewer to reproduce/verify what this PR does/fixes. --> ### Screenshot <!-- If this PR touches UI, please post evidence (screenshots) of it behaving correctly. --> ### Stakeholders <!-- @ tag the lead (as labeled on the issue) and other stakeholders --> @cdrini @scottbarnes <!-- Attribution Disclaimer: By proposing this pull request, I affirm to have made a best-effort and exercised my discretion to make sure relevant sections of this code which substantially leverage code suggestions, code generation, or code snippets from sources (e.g. Stack Overflow, GitHub) have been annotated with basic attribution so reviewers & contributors may have confidence and access to the correct context to evaluate and use this code. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/catalog/add_book/load_book.py` Content: ``` 1 from typing import TYPE_CHECKING, Any, Final 2 import web 3 from openlibrary.catalog.utils import flip_name, author_dates_match, key_int 4 from openlibrary.core.helpers import extract_year 5 6 7 if TYPE_CHECKING: 8 from openlibrary.plugins.upstream.models import Author 9 10 11 # Sort by descending length to remove the _longest_ match. 12 # E.g. remove "señorita" and not "señor", when both match. 13 HONORIFICS: Final = sorted( 14 [ 15 'countess', 16 'doctor', 17 'doktor', 18 'dr', 19 'dr.', 20 'frau', 21 'fräulein', 22 'herr', 23 'lady', 24 'lord', 25 'm.', 26 'madame', 27 'mademoiselle', 28 'miss', 29 'mister', 30 'mistress', 31 'mixter', 32 'mlle', 33 'mlle.', 34 'mme', 35 'mme.', 36 'monsieur', 37 'mr', 38 'mr.', 39 'mrs', 40 'mrs.', 41 'ms', 42 'ms.', 43 'mx', 44 'mx.', 45 'professor', 46 'señor', 47 'señora', 48 'señorita', 49 'sir', 50 'sr.', 51 'sra.', 52 'srta.', 53 ], 54 key=lambda x: len(x), 55 reverse=True, 56 ) 57 58 HONORIFC_NAME_EXECPTIONS = frozenset( 59 { 60 "dr. seuss", 61 "dr seuss", 62 "dr oetker", 63 "doctor oetker", 64 } 65 ) 66 67 68 def east_in_by_statement(rec, author): 69 """ 70 Returns False if there is no by_statement in rec. 71 Otherwise returns whether author name uses eastern name order. 72 TODO: elaborate on what this actually means, and how it is used. 73 74 :param dict rec: import source edition record 75 :param dict author: import source author dict: {"name": "Some One"} 76 :rtype: bool 77 """ 78 79 if 'by_statement' not in rec: 80 return False 81 if 'authors' not in rec: 82 return False 83 name = author['name'] 84 flipped = flip_name(name) 85 name = name.replace('.', '') 86 name = name.replace(', ', '') 87 if name == flipped.replace('.', ''): 88 # name was not flipped 89 return False 90 return rec['by_statement'].find(name) != -1 91 92 93 def do_flip(author): 94 """ 95 Given an author import dict, flip its name in place 96 i.e. Smith, John => John Smith 97 98 :param dict author: 99 :rtype: None 100 """ 101 if 'personal_name' in author and author['personal_name'] != author['name']: 102 # Don't flip names if name is more complex than personal_name (legacy behaviour) 103 return 104 first_comma = author['name'].find(', ') 105 if first_comma == -1: 106 return 107 # e.g: Harper, John Murdoch, 1845- 108 if author['name'].find(',', first_comma + 1) != -1: 109 return 110 if author['name'].find('i.e.') != -1: 111 return 112 if author['name'].find('i. e.') != -1: 113 return 114 name = flip_name(author['name']) 115 author['name'] = name 116 if 'personal_name' in author: 117 author['personal_name'] = name 118 119 120 def pick_from_matches(author, match): 121 """ 122 Finds the best match for author from a list of OL authors records, match. 123 124 :param dict author: Author import representation 125 :param list match: List of matching OL author records 126 :rtype: dict 127 :return: A single OL author record from match 128 """ 129 maybe = [] 130 if 'birth_date' in author and 'death_date' in author: 131 maybe = [m for m in match if 'birth_date' in m and 'death_date' in m] 132 elif 'date' in author: 133 maybe = [m for m in match if 'date' in m] 134 if not maybe: 135 maybe = match 136 if len(maybe) == 1: 137 return maybe[0] 138 return min(maybe, key=key_int) 139 140 141 def find_author(author: dict[str, Any]) -> list["Author"]: 142 """ 143 Searches OL for an author by name. 144 145 :param str name: Author's name 146 :rtype: list 147 :return: A list of OL author representations than match name 148 """ 149 150 def walk_redirects(obj, seen): 151 seen.add(obj['key']) 152 while obj['type']['key'] == '/type/redirect': 153 assert obj['location'] != obj['key'] 154 obj = web.ctx.site.get(obj['location']) 155 seen.add(obj['key']) 156 return obj 157 158 # Try for an 'exact' (case-insensitive) name match, but fall back to alternate_names, 159 # then last name with identical birth and death dates (that are not themselves `None` or ''). 160 name = author["name"].replace("*", r"\*") 161 queries = [ 162 {"type": "/type/author", "name~": name}, 163 {"type": "/type/author", "alternate_names~": name}, 164 { 165 "type": "/type/author", 166 "name~": f"* {name.split()[-1]}", 167 "birth_date~": f"*{extract_year(author.get('birth_date', '')) or -1}*", 168 "death_date~": f"*{extract_year(author.get('death_date', '')) or -1}*", 169 }, # Use `-1` to ensure an empty string from extract_year doesn't match empty dates. 170 ] 171 for query in queries: 172 if reply := list(web.ctx.site.things(query)): 173 break 174 175 authors = [web.ctx.site.get(k) for k in reply] 176 if any(a.type.key != '/type/author' for a in authors): 177 seen: set[dict] = set() 178 authors = [walk_redirects(a, seen) for a in authors if a['key'] not in seen] 179 return authors 180 181 182 def find_entity(author: dict[str, Any]) -> "Author | None": 183 """ 184 Looks for an existing Author record in OL by name 185 and returns it if found. 186 187 :param dict author: Author import dict {"name": "Some One"} 188 :return: Existing Author record if found, or None. 189 """ 190 name = author['name'] 191 things = find_author(author) 192 et = author.get('entity_type') 193 if et and et != 'person': 194 if not things: 195 return None 196 db_entity = things[0] 197 assert db_entity['type']['key'] == '/type/author' 198 return db_entity 199 if ', ' in name: 200 flipped_name = flip_name(author["name"]) 201 author_flipped_name = author.copy() 202 things += find_author(author_flipped_name) 203 match = [] 204 seen = set() 205 for a in things: 206 key = a['key'] 207 if key in seen: 208 continue 209 seen.add(key) 210 orig_key = key 211 assert a.type.key == '/type/author' 212 if 'birth_date' in author and 'birth_date' not in a: 213 continue 214 if 'birth_date' not in author and 'birth_date' in a: 215 continue 216 if not author_dates_match(author, a): 217 continue 218 match.append(a) 219 if not match: 220 return None 221 if len(match) == 1: 222 return match[0] 223 return pick_from_matches(author, match) 224 225 226 def remove_author_honorifics(name: str) -> str: 227 """ 228 Remove honorifics from an author's name field. 229 230 If the author's name is only an honorific, it will return the original name. 231 """ 232 if name.casefold() in HONORIFC_NAME_EXECPTIONS: 233 return name 234 235 if honorific := next( 236 ( 237 honorific 238 for honorific in HONORIFICS 239 if name.casefold().startswith(honorific) 240 ), 241 None, 242 ): 243 return name[len(honorific) :].lstrip() or name 244 245 return name 246 247 248 def import_author(author: dict[str, Any], eastern=False) -> "Author | dict[str, Any]": 249 """ 250 Converts an import style new-author dictionary into an 251 Open Library existing author, or new author candidate, representation. 252 Does NOT create new authors. 253 254 :param dict author: Author import record {"name": "Some One"} 255 :param bool eastern: Eastern name order 256 :return: Open Library style Author representation, either existing with "key", 257 or new candidate without "key". 258 """ 259 if existing := find_entity(author): 260 assert existing.type.key == '/type/author' 261 for k in 'last_modified', 'id', 'revision', 'created': 262 if existing.k: 263 del existing.k 264 new = existing 265 if 'death_date' in author and 'death_date' not in existing: 266 new['death_date'] = author['death_date'] 267 return new 268 if author.get('entity_type') != 'org' and not eastern: 269 do_flip(author) 270 a = {'type': {'key': '/type/author'}} 271 for f in 'name', 'title', 'personal_name', 'birth_date', 'death_date', 'date': 272 if f in author: 273 a[f] = author[f] 274 return a 275 276 277 class InvalidLanguage(Exception): 278 def __init__(self, code): 279 self.code = code 280 281 def __str__(self): 282 return "invalid language code: '%s'" % self.code 283 284 285 type_map = {'description': 'text', 'notes': 'text', 'number_of_pages': 'int'} 286 287 288 def build_query(rec): 289 """ 290 Takes an edition record dict, rec, and returns an Open Library edition 291 suitable for saving. 292 293 :param dict rec: Edition record to add to Open Library 294 :rtype: dict 295 :return: Open Library style edition representation 296 """ 297 book = { 298 'type': {'key': '/type/edition'}, 299 } 300 301 for k, v in rec.items(): 302 if k == 'authors': 303 if v and v[0]: 304 book['authors'] = [] 305 for author in v: 306 author['name'] = remove_author_honorifics(author['name']) 307 east = east_in_by_statement(rec, author) 308 book['authors'].append(import_author(author, eastern=east)) 309 continue 310 if k in ('languages', 'translated_from'): 311 for language in v: 312 if web.ctx.site.get('/languages/' + language) is None: 313 raise InvalidLanguage(language) 314 book[k] = [{'key': '/languages/' + language} for language in v] 315 continue 316 if k in type_map: 317 t = '/type/' + type_map[k] 318 if isinstance(v, list): 319 book[k] = [{'type': t, 'value': i} for i in v] 320 else: 321 book[k] = {'type': t, 'value': v} 322 else: 323 book[k] = v 324 return book 325 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/catalog/add_book/load_book.py b/openlibrary/catalog/add_book/load_book.py --- a/openlibrary/catalog/add_book/load_book.py +++ b/openlibrary/catalog/add_book/load_book.py @@ -236,11 +236,11 @@ ( honorific for honorific in HONORIFICS - if name.casefold().startswith(honorific) + if name.casefold().startswith(f"{honorific} ") # Note the trailing space. ), None, ): - return name[len(honorific) :].lstrip() or name + return name[len(f"{honorific} ") :].lstrip() or name return name
{"golden_diff": "diff --git a/openlibrary/catalog/add_book/load_book.py b/openlibrary/catalog/add_book/load_book.py\n--- a/openlibrary/catalog/add_book/load_book.py\n+++ b/openlibrary/catalog/add_book/load_book.py\n@@ -236,11 +236,11 @@\n (\n honorific\n for honorific in HONORIFICS\n- if name.casefold().startswith(honorific)\n+ if name.casefold().startswith(f\"{honorific} \") # Note the trailing space.\n ),\n None,\n ):\n- return name[len(honorific) :].lstrip() or name\n+ return name[len(f\"{honorific} \") :].lstrip() or name\n \n return name\n", "issue": "add a failing author name test\n<!-- What issue does this PR close? -->\r\nDemonstrates a bug in #9003\r\n\r\n<!-- What does this PR achieve? [feature|hotfix|fix|refactor] -->\r\n\r\n`Drini` -> `ini`\r\n\r\n**Expected:**\r\n`Drini` -> `Drini`\r\n\r\n\ud83d\ude09 \r\n\r\n### Technical\r\n<!-- What should be noted about the implementation? -->\r\n\r\n### Testing\r\n<!-- Steps for reviewer to reproduce/verify what this PR does/fixes. -->\r\n\r\n### Screenshot\r\n<!-- If this PR touches UI, please post evidence (screenshots) of it behaving correctly. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag the lead (as labeled on the issue) and other stakeholders -->\r\n@cdrini \r\n@scottbarnes \r\n\r\n<!-- Attribution Disclaimer: By proposing this pull request, I affirm to have made a best-effort and exercised my discretion to make sure relevant sections of this code which substantially leverage code suggestions, code generation, or code snippets from sources (e.g. Stack Overflow, GitHub) have been annotated with basic attribution so reviewers & contributors may have confidence and access to the correct context to evaluate and use this code. -->\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Final\nimport web\nfrom openlibrary.catalog.utils import flip_name, author_dates_match, key_int\nfrom openlibrary.core.helpers import extract_year\n\n\nif TYPE_CHECKING:\n from openlibrary.plugins.upstream.models import Author\n\n\n# Sort by descending length to remove the _longest_ match.\n# E.g. remove \"se\u00f1orita\" and not \"se\u00f1or\", when both match.\nHONORIFICS: Final = sorted(\n [\n 'countess',\n 'doctor',\n 'doktor',\n 'dr',\n 'dr.',\n 'frau',\n 'fr\u00e4ulein',\n 'herr',\n 'lady',\n 'lord',\n 'm.',\n 'madame',\n 'mademoiselle',\n 'miss',\n 'mister',\n 'mistress',\n 'mixter',\n 'mlle',\n 'mlle.',\n 'mme',\n 'mme.',\n 'monsieur',\n 'mr',\n 'mr.',\n 'mrs',\n 'mrs.',\n 'ms',\n 'ms.',\n 'mx',\n 'mx.',\n 'professor',\n 'se\u00f1or',\n 'se\u00f1ora',\n 'se\u00f1orita',\n 'sir',\n 'sr.',\n 'sra.',\n 'srta.',\n ],\n key=lambda x: len(x),\n reverse=True,\n)\n\nHONORIFC_NAME_EXECPTIONS = frozenset(\n {\n \"dr. seuss\",\n \"dr seuss\",\n \"dr oetker\",\n \"doctor oetker\",\n }\n)\n\n\ndef east_in_by_statement(rec, author):\n \"\"\"\n Returns False if there is no by_statement in rec.\n Otherwise returns whether author name uses eastern name order.\n TODO: elaborate on what this actually means, and how it is used.\n\n :param dict rec: import source edition record\n :param dict author: import source author dict: {\"name\": \"Some One\"}\n :rtype: bool\n \"\"\"\n\n if 'by_statement' not in rec:\n return False\n if 'authors' not in rec:\n return False\n name = author['name']\n flipped = flip_name(name)\n name = name.replace('.', '')\n name = name.replace(', ', '')\n if name == flipped.replace('.', ''):\n # name was not flipped\n return False\n return rec['by_statement'].find(name) != -1\n\n\ndef do_flip(author):\n \"\"\"\n Given an author import dict, flip its name in place\n i.e. Smith, John => John Smith\n\n :param dict author:\n :rtype: None\n \"\"\"\n if 'personal_name' in author and author['personal_name'] != author['name']:\n # Don't flip names if name is more complex than personal_name (legacy behaviour)\n return\n first_comma = author['name'].find(', ')\n if first_comma == -1:\n return\n # e.g: Harper, John Murdoch, 1845-\n if author['name'].find(',', first_comma + 1) != -1:\n return\n if author['name'].find('i.e.') != -1:\n return\n if author['name'].find('i. e.') != -1:\n return\n name = flip_name(author['name'])\n author['name'] = name\n if 'personal_name' in author:\n author['personal_name'] = name\n\n\ndef pick_from_matches(author, match):\n \"\"\"\n Finds the best match for author from a list of OL authors records, match.\n\n :param dict author: Author import representation\n :param list match: List of matching OL author records\n :rtype: dict\n :return: A single OL author record from match\n \"\"\"\n maybe = []\n if 'birth_date' in author and 'death_date' in author:\n maybe = [m for m in match if 'birth_date' in m and 'death_date' in m]\n elif 'date' in author:\n maybe = [m for m in match if 'date' in m]\n if not maybe:\n maybe = match\n if len(maybe) == 1:\n return maybe[0]\n return min(maybe, key=key_int)\n\n\ndef find_author(author: dict[str, Any]) -> list[\"Author\"]:\n \"\"\"\n Searches OL for an author by name.\n\n :param str name: Author's name\n :rtype: list\n :return: A list of OL author representations than match name\n \"\"\"\n\n def walk_redirects(obj, seen):\n seen.add(obj['key'])\n while obj['type']['key'] == '/type/redirect':\n assert obj['location'] != obj['key']\n obj = web.ctx.site.get(obj['location'])\n seen.add(obj['key'])\n return obj\n\n # Try for an 'exact' (case-insensitive) name match, but fall back to alternate_names,\n # then last name with identical birth and death dates (that are not themselves `None` or '').\n name = author[\"name\"].replace(\"*\", r\"\\*\")\n queries = [\n {\"type\": \"/type/author\", \"name~\": name},\n {\"type\": \"/type/author\", \"alternate_names~\": name},\n {\n \"type\": \"/type/author\",\n \"name~\": f\"* {name.split()[-1]}\",\n \"birth_date~\": f\"*{extract_year(author.get('birth_date', '')) or -1}*\",\n \"death_date~\": f\"*{extract_year(author.get('death_date', '')) or -1}*\",\n }, # Use `-1` to ensure an empty string from extract_year doesn't match empty dates.\n ]\n for query in queries:\n if reply := list(web.ctx.site.things(query)):\n break\n\n authors = [web.ctx.site.get(k) for k in reply]\n if any(a.type.key != '/type/author' for a in authors):\n seen: set[dict] = set()\n authors = [walk_redirects(a, seen) for a in authors if a['key'] not in seen]\n return authors\n\n\ndef find_entity(author: dict[str, Any]) -> \"Author | None\":\n \"\"\"\n Looks for an existing Author record in OL by name\n and returns it if found.\n\n :param dict author: Author import dict {\"name\": \"Some One\"}\n :return: Existing Author record if found, or None.\n \"\"\"\n name = author['name']\n things = find_author(author)\n et = author.get('entity_type')\n if et and et != 'person':\n if not things:\n return None\n db_entity = things[0]\n assert db_entity['type']['key'] == '/type/author'\n return db_entity\n if ', ' in name:\n flipped_name = flip_name(author[\"name\"])\n author_flipped_name = author.copy()\n things += find_author(author_flipped_name)\n match = []\n seen = set()\n for a in things:\n key = a['key']\n if key in seen:\n continue\n seen.add(key)\n orig_key = key\n assert a.type.key == '/type/author'\n if 'birth_date' in author and 'birth_date' not in a:\n continue\n if 'birth_date' not in author and 'birth_date' in a:\n continue\n if not author_dates_match(author, a):\n continue\n match.append(a)\n if not match:\n return None\n if len(match) == 1:\n return match[0]\n return pick_from_matches(author, match)\n\n\ndef remove_author_honorifics(name: str) -> str:\n \"\"\"\n Remove honorifics from an author's name field.\n\n If the author's name is only an honorific, it will return the original name.\n \"\"\"\n if name.casefold() in HONORIFC_NAME_EXECPTIONS:\n return name\n\n if honorific := next(\n (\n honorific\n for honorific in HONORIFICS\n if name.casefold().startswith(honorific)\n ),\n None,\n ):\n return name[len(honorific) :].lstrip() or name\n\n return name\n\n\ndef import_author(author: dict[str, Any], eastern=False) -> \"Author | dict[str, Any]\":\n \"\"\"\n Converts an import style new-author dictionary into an\n Open Library existing author, or new author candidate, representation.\n Does NOT create new authors.\n\n :param dict author: Author import record {\"name\": \"Some One\"}\n :param bool eastern: Eastern name order\n :return: Open Library style Author representation, either existing with \"key\",\n or new candidate without \"key\".\n \"\"\"\n if existing := find_entity(author):\n assert existing.type.key == '/type/author'\n for k in 'last_modified', 'id', 'revision', 'created':\n if existing.k:\n del existing.k\n new = existing\n if 'death_date' in author and 'death_date' not in existing:\n new['death_date'] = author['death_date']\n return new\n if author.get('entity_type') != 'org' and not eastern:\n do_flip(author)\n a = {'type': {'key': '/type/author'}}\n for f in 'name', 'title', 'personal_name', 'birth_date', 'death_date', 'date':\n if f in author:\n a[f] = author[f]\n return a\n\n\nclass InvalidLanguage(Exception):\n def __init__(self, code):\n self.code = code\n\n def __str__(self):\n return \"invalid language code: '%s'\" % self.code\n\n\ntype_map = {'description': 'text', 'notes': 'text', 'number_of_pages': 'int'}\n\n\ndef build_query(rec):\n \"\"\"\n Takes an edition record dict, rec, and returns an Open Library edition\n suitable for saving.\n\n :param dict rec: Edition record to add to Open Library\n :rtype: dict\n :return: Open Library style edition representation\n \"\"\"\n book = {\n 'type': {'key': '/type/edition'},\n }\n\n for k, v in rec.items():\n if k == 'authors':\n if v and v[0]:\n book['authors'] = []\n for author in v:\n author['name'] = remove_author_honorifics(author['name'])\n east = east_in_by_statement(rec, author)\n book['authors'].append(import_author(author, eastern=east))\n continue\n if k in ('languages', 'translated_from'):\n for language in v:\n if web.ctx.site.get('/languages/' + language) is None:\n raise InvalidLanguage(language)\n book[k] = [{'key': '/languages/' + language} for language in v]\n continue\n if k in type_map:\n t = '/type/' + type_map[k]\n if isinstance(v, list):\n book[k] = [{'type': t, 'value': i} for i in v]\n else:\n book[k] = {'type': t, 'value': v}\n else:\n book[k] = v\n return book\n", "path": "openlibrary/catalog/add_book/load_book.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Any, Final\nimport web\nfrom openlibrary.catalog.utils import flip_name, author_dates_match, key_int\nfrom openlibrary.core.helpers import extract_year\n\n\nif TYPE_CHECKING:\n from openlibrary.plugins.upstream.models import Author\n\n\n# Sort by descending length to remove the _longest_ match.\n# E.g. remove \"se\u00f1orita\" and not \"se\u00f1or\", when both match.\nHONORIFICS: Final = sorted(\n [\n 'countess',\n 'doctor',\n 'doktor',\n 'dr',\n 'dr.',\n 'frau',\n 'fr\u00e4ulein',\n 'herr',\n 'lady',\n 'lord',\n 'm.',\n 'madame',\n 'mademoiselle',\n 'miss',\n 'mister',\n 'mistress',\n 'mixter',\n 'mlle',\n 'mlle.',\n 'mme',\n 'mme.',\n 'monsieur',\n 'mr',\n 'mr.',\n 'mrs',\n 'mrs.',\n 'ms',\n 'ms.',\n 'mx',\n 'mx.',\n 'professor',\n 'se\u00f1or',\n 'se\u00f1ora',\n 'se\u00f1orita',\n 'sir',\n 'sr.',\n 'sra.',\n 'srta.',\n ],\n key=lambda x: len(x),\n reverse=True,\n)\n\nHONORIFC_NAME_EXECPTIONS = frozenset(\n {\n \"dr. seuss\",\n \"dr seuss\",\n \"dr oetker\",\n \"doctor oetker\",\n }\n)\n\n\ndef east_in_by_statement(rec, author):\n \"\"\"\n Returns False if there is no by_statement in rec.\n Otherwise returns whether author name uses eastern name order.\n TODO: elaborate on what this actually means, and how it is used.\n\n :param dict rec: import source edition record\n :param dict author: import source author dict: {\"name\": \"Some One\"}\n :rtype: bool\n \"\"\"\n\n if 'by_statement' not in rec:\n return False\n if 'authors' not in rec:\n return False\n name = author['name']\n flipped = flip_name(name)\n name = name.replace('.', '')\n name = name.replace(', ', '')\n if name == flipped.replace('.', ''):\n # name was not flipped\n return False\n return rec['by_statement'].find(name) != -1\n\n\ndef do_flip(author):\n \"\"\"\n Given an author import dict, flip its name in place\n i.e. Smith, John => John Smith\n\n :param dict author:\n :rtype: None\n \"\"\"\n if 'personal_name' in author and author['personal_name'] != author['name']:\n # Don't flip names if name is more complex than personal_name (legacy behaviour)\n return\n first_comma = author['name'].find(', ')\n if first_comma == -1:\n return\n # e.g: Harper, John Murdoch, 1845-\n if author['name'].find(',', first_comma + 1) != -1:\n return\n if author['name'].find('i.e.') != -1:\n return\n if author['name'].find('i. e.') != -1:\n return\n name = flip_name(author['name'])\n author['name'] = name\n if 'personal_name' in author:\n author['personal_name'] = name\n\n\ndef pick_from_matches(author, match):\n \"\"\"\n Finds the best match for author from a list of OL authors records, match.\n\n :param dict author: Author import representation\n :param list match: List of matching OL author records\n :rtype: dict\n :return: A single OL author record from match\n \"\"\"\n maybe = []\n if 'birth_date' in author and 'death_date' in author:\n maybe = [m for m in match if 'birth_date' in m and 'death_date' in m]\n elif 'date' in author:\n maybe = [m for m in match if 'date' in m]\n if not maybe:\n maybe = match\n if len(maybe) == 1:\n return maybe[0]\n return min(maybe, key=key_int)\n\n\ndef find_author(author: dict[str, Any]) -> list[\"Author\"]:\n \"\"\"\n Searches OL for an author by name.\n\n :param str name: Author's name\n :rtype: list\n :return: A list of OL author representations than match name\n \"\"\"\n\n def walk_redirects(obj, seen):\n seen.add(obj['key'])\n while obj['type']['key'] == '/type/redirect':\n assert obj['location'] != obj['key']\n obj = web.ctx.site.get(obj['location'])\n seen.add(obj['key'])\n return obj\n\n # Try for an 'exact' (case-insensitive) name match, but fall back to alternate_names,\n # then last name with identical birth and death dates (that are not themselves `None` or '').\n name = author[\"name\"].replace(\"*\", r\"\\*\")\n queries = [\n {\"type\": \"/type/author\", \"name~\": name},\n {\"type\": \"/type/author\", \"alternate_names~\": name},\n {\n \"type\": \"/type/author\",\n \"name~\": f\"* {name.split()[-1]}\",\n \"birth_date~\": f\"*{extract_year(author.get('birth_date', '')) or -1}*\",\n \"death_date~\": f\"*{extract_year(author.get('death_date', '')) or -1}*\",\n }, # Use `-1` to ensure an empty string from extract_year doesn't match empty dates.\n ]\n for query in queries:\n if reply := list(web.ctx.site.things(query)):\n break\n\n authors = [web.ctx.site.get(k) for k in reply]\n if any(a.type.key != '/type/author' for a in authors):\n seen: set[dict] = set()\n authors = [walk_redirects(a, seen) for a in authors if a['key'] not in seen]\n return authors\n\n\ndef find_entity(author: dict[str, Any]) -> \"Author | None\":\n \"\"\"\n Looks for an existing Author record in OL by name\n and returns it if found.\n\n :param dict author: Author import dict {\"name\": \"Some One\"}\n :return: Existing Author record if found, or None.\n \"\"\"\n name = author['name']\n things = find_author(author)\n et = author.get('entity_type')\n if et and et != 'person':\n if not things:\n return None\n db_entity = things[0]\n assert db_entity['type']['key'] == '/type/author'\n return db_entity\n if ', ' in name:\n flipped_name = flip_name(author[\"name\"])\n author_flipped_name = author.copy()\n things += find_author(author_flipped_name)\n match = []\n seen = set()\n for a in things:\n key = a['key']\n if key in seen:\n continue\n seen.add(key)\n orig_key = key\n assert a.type.key == '/type/author'\n if 'birth_date' in author and 'birth_date' not in a:\n continue\n if 'birth_date' not in author and 'birth_date' in a:\n continue\n if not author_dates_match(author, a):\n continue\n match.append(a)\n if not match:\n return None\n if len(match) == 1:\n return match[0]\n return pick_from_matches(author, match)\n\n\ndef remove_author_honorifics(name: str) -> str:\n \"\"\"\n Remove honorifics from an author's name field.\n\n If the author's name is only an honorific, it will return the original name.\n \"\"\"\n if name.casefold() in HONORIFC_NAME_EXECPTIONS:\n return name\n\n if honorific := next(\n (\n honorific\n for honorific in HONORIFICS\n if name.casefold().startswith(f\"{honorific} \") # Note the trailing space.\n ),\n None,\n ):\n return name[len(f\"{honorific} \") :].lstrip() or name\n\n return name\n\n\ndef import_author(author: dict[str, Any], eastern=False) -> \"Author | dict[str, Any]\":\n \"\"\"\n Converts an import style new-author dictionary into an\n Open Library existing author, or new author candidate, representation.\n Does NOT create new authors.\n\n :param dict author: Author import record {\"name\": \"Some One\"}\n :param bool eastern: Eastern name order\n :return: Open Library style Author representation, either existing with \"key\",\n or new candidate without \"key\".\n \"\"\"\n if existing := find_entity(author):\n assert existing.type.key == '/type/author'\n for k in 'last_modified', 'id', 'revision', 'created':\n if existing.k:\n del existing.k\n new = existing\n if 'death_date' in author and 'death_date' not in existing:\n new['death_date'] = author['death_date']\n return new\n if author.get('entity_type') != 'org' and not eastern:\n do_flip(author)\n a = {'type': {'key': '/type/author'}}\n for f in 'name', 'title', 'personal_name', 'birth_date', 'death_date', 'date':\n if f in author:\n a[f] = author[f]\n return a\n\n\nclass InvalidLanguage(Exception):\n def __init__(self, code):\n self.code = code\n\n def __str__(self):\n return \"invalid language code: '%s'\" % self.code\n\n\ntype_map = {'description': 'text', 'notes': 'text', 'number_of_pages': 'int'}\n\n\ndef build_query(rec):\n \"\"\"\n Takes an edition record dict, rec, and returns an Open Library edition\n suitable for saving.\n\n :param dict rec: Edition record to add to Open Library\n :rtype: dict\n :return: Open Library style edition representation\n \"\"\"\n book = {\n 'type': {'key': '/type/edition'},\n }\n\n for k, v in rec.items():\n if k == 'authors':\n if v and v[0]:\n book['authors'] = []\n for author in v:\n author['name'] = remove_author_honorifics(author['name'])\n east = east_in_by_statement(rec, author)\n book['authors'].append(import_author(author, eastern=east))\n continue\n if k in ('languages', 'translated_from'):\n for language in v:\n if web.ctx.site.get('/languages/' + language) is None:\n raise InvalidLanguage(language)\n book[k] = [{'key': '/languages/' + language} for language in v]\n continue\n if k in type_map:\n t = '/type/' + type_map[k]\n if isinstance(v, list):\n book[k] = [{'type': t, 'value': i} for i in v]\n else:\n book[k] = {'type': t, 'value': v}\n else:\n book[k] = v\n return book\n", "path": "openlibrary/catalog/add_book/load_book.py"}]}
3,815
159
gh_patches_debug_6778
rasdani/github-patches
git_diff
flairNLP__flair-160
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Flair depends on AWS CLI (boto library etc) --- should be removed/optional I assume that the usual Flair user does not need any AWS dependency, therefore the hard AWS CLI dependency (defined in `setup.py`) should be removed or at least made optional. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 3 setup( 4 name='flair', 5 version='0.3.0', 6 description='A very simple framework for state-of-the-art NLP', 7 long_description=open("README.md", encoding='utf-8').read(), 8 long_description_content_type="text/markdown", 9 author='Alan Akbik', 10 author_email='[email protected]', 11 url='https://github.com/zalandoresearch/flair', 12 packages=find_packages(exclude='test'), # same as name 13 license='MIT', 14 install_requires=[ 15 'torch==0.4.1', 16 'awscli==1.14.32', 17 'gensim==3.4.0', 18 'typing==3.6.4', 19 'tqdm==4.23.4', 20 'segtok==1.5.6', 21 'matplotlib==3.0.0', 22 'mpld3==0.3', 23 'jinja2==2.10', 24 'sklearn', 25 'sqlitedict==1.6.0', 26 ], 27 include_package_data=True, 28 python_requires='>=3.6', 29 ) 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -13,14 +13,12 @@ license='MIT', install_requires=[ 'torch==0.4.1', - 'awscli==1.14.32', 'gensim==3.4.0', 'typing==3.6.4', 'tqdm==4.23.4', 'segtok==1.5.6', 'matplotlib==3.0.0', 'mpld3==0.3', - 'jinja2==2.10', 'sklearn', 'sqlitedict==1.6.0', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,14 +13,12 @@\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n- 'awscli==1.14.32',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.23.4',\n 'segtok==1.5.6',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n- 'jinja2==2.10',\n 'sklearn',\n 'sqlitedict==1.6.0',\n ],\n", "issue": "Flair depends on AWS CLI (boto library etc) --- should be removed/optional\nI assume that the usual Flair user does not need any AWS dependency, therefore the hard AWS CLI dependency (defined in `setup.py`) should be removed or at least made optional.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n 'awscli==1.14.32',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.23.4',\n 'segtok==1.5.6',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'jinja2==2.10',\n 'sklearn',\n 'sqlitedict==1.6.0',\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.3.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch==0.4.1',\n 'gensim==3.4.0',\n 'typing==3.6.4',\n 'tqdm==4.23.4',\n 'segtok==1.5.6',\n 'matplotlib==3.0.0',\n 'mpld3==0.3',\n 'sklearn',\n 'sqlitedict==1.6.0',\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]}
625
164
gh_patches_debug_25887
rasdani/github-patches
git_diff
opendatacube__datacube-core-362
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop Support for Python 2 I'd like to propose that future development on Data Cube Core stop supporting Python 2 after the beginning of February 2018. We can continue to support Python 2 for bug fix releases of version 1.5.x, but beyond that it is an ongoing drain on the limited developer resources. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import versioneer 4 from setuptools import setup, find_packages 5 6 tests_require = [ 7 'compliance-checker', 8 'hypothesis', 9 'mock', 10 'objgraph', 11 'pycodestyle', 12 'pylint', 13 'pytest', 14 'pytest-cov', 15 'pytest-timeout', 16 ] 17 18 extras_require = { 19 'performance': ['ciso8601', 'bottleneck'], 20 'interactive': ['matplotlib', 'fiona'], 21 'distributed': ['distributed', 'dask[distributed]'], 22 'analytics': ['scipy', 'pyparsing', 'numexpr'], 23 'doc': ['Sphinx', 'setuptools'], 24 'replicas': ['paramiko', 'sshtunnel', 'tqdm'], 25 'celery': ['celery>=4', 'redis'], 26 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'], 27 'test': tests_require, 28 } 29 # An 'all' option, following ipython naming conventions. 30 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 31 32 setup( 33 name='datacube', 34 version=versioneer.get_version(), 35 cmdclass=versioneer.get_cmdclass(), 36 37 url='https://github.com/opendatacube/datacube-core', 38 author='AGDC Collaboration', 39 maintainer='AGDC Collaboration', 40 maintainer_email='', 41 description='An analysis environment for satellite and other earth observation data', 42 long_description=open('README.rst').read(), 43 license='Apache License 2.0', 44 classifiers=[ 45 "Development Status :: 4 - Beta", 46 "Intended Audience :: Developers", 47 "Intended Audience :: Science/Research", 48 "License :: OSI Approved :: Apache Software License", 49 "Natural Language :: English", 50 "Operating System :: MacOS :: MacOS X", 51 "Operating System :: POSIX", 52 "Operating System :: POSIX :: BSD", 53 "Operating System :: POSIX :: Linux", 54 "Operating System :: Microsoft :: Windows", 55 "Programming Language :: Python", 56 "Programming Language :: Python :: 2", 57 "Programming Language :: Python :: 2.7", 58 "Programming Language :: Python :: 3", 59 "Programming Language :: Python :: 3.5", 60 "Topic :: Scientific/Engineering :: GIS", 61 "Topic :: Scientific/Engineering :: Information Analysis", 62 ], 63 64 packages=find_packages( 65 exclude=('tests', 'tests.*', 66 'integration_tests', 'integration_tests.*') 67 ), 68 package_data={ 69 '': ['*.yaml', '*/*.yaml'], 70 }, 71 scripts=[ 72 'datacube_apps/scripts/pbs_helpers.sh' 73 ], 74 setup_requires=[ 75 'pytest-runner' 76 ], 77 install_requires=[ 78 'affine', 79 'cachetools', 80 'click>=5.0', 81 'cloudpickle>=0.4', 82 'dask[array]', 83 'gdal>=1.9', 84 'jsonschema', 85 'netcdf4', 86 'numpy', 87 'pathlib;python_version<"3"', 88 'psycopg2', 89 'pypeg2', 90 'python-dateutil', 91 'pyyaml', 92 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems 93 'singledispatch', 94 'sqlalchemy', 95 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost 96 ], 97 extras_require=extras_require, 98 tests_require=tests_require, 99 100 entry_points={ 101 'console_scripts': [ 102 'datacube = datacube.scripts.cli_app:cli', 103 'datacube-search = datacube.scripts.search_tool:cli', 104 'datacube-stacker = datacube_apps.stacker:main', 105 'datacube-worker = datacube.execution.worker:main', 106 'datacube-fixer = datacube_apps.stacker:fixer_main', 107 'datacube-ncml = datacube_apps.ncml:ncml_app', 108 'pixeldrill = datacube_apps.pixeldrill:main [interactive]', 109 'movie_generator = datacube_apps.movie_generator:main', 110 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]' 111 ], 112 'datacube.plugins.io.read': [ 113 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 114 's3aio = datacube.drivers.s3.driver:reader_driver_init', 115 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init' 116 ], 117 'datacube.plugins.io.write': [ 118 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 119 's3aio = datacube.drivers.s3.driver:writer_driver_init', 120 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init', 121 ], 122 'datacube.plugins.index': [ 123 'default = datacube.index.index:index_driver_init', 124 's3aio_index = datacube.drivers.s3aio_index:index_driver_init', 125 ], 126 }, 127 ) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ name='datacube', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), + python_requires='>3.5.2', url='https://github.com/opendatacube/datacube-core', author='AGDC Collaboration', @@ -53,10 +54,9 @@ "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering :: GIS", "Topic :: Scientific/Engineering :: Information Analysis", ], @@ -84,7 +84,6 @@ 'jsonschema', 'netcdf4', 'numpy', - 'pathlib;python_version<"3"', 'psycopg2', 'pypeg2', 'python-dateutil',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,6 +33,7 @@\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n+ python_requires='>3.5.2',\n \n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n@@ -53,10 +54,9 @@\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n- \"Programming Language :: Python :: 2\",\n- \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n@@ -84,7 +84,6 @@\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n- 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n", "issue": "Drop Support for Python 2\nI'd like to propose that future development on Data Cube Core stop supporting Python 2 after the beginning of February 2018.\r\n\r\nWe can continue to support Python 2 for bug fix releases of version 1.5.x, but beyond that it is an ongoing drain on the limited developer resources.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker',\n 'hypothesis',\n 'mock',\n 'objgraph',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'pathlib;python_version<\"3\"',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n 's3aio = datacube.drivers.s3.driver:reader_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n 's3aio = datacube.drivers.s3.driver:writer_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker',\n 'hypothesis',\n 'mock',\n 'objgraph',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n python_requires='>3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n 's3aio = datacube.drivers.s3.driver:reader_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n 's3aio = datacube.drivers.s3.driver:writer_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',\n ],\n },\n)\n", "path": "setup.py"}]}
1,713
268
gh_patches_debug_32556
rasdani/github-patches
git_diff
streamlink__streamlink-121
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plugin LivecodingTV fails to load on Python 3.6.0b2 on Windows 10 x64 Just running streamlink raises the following error on my system with a fresh install: ``` C:\WINDOWS\system32>streamlink Failed to load plugin livecodingtv: File "c:\program files\python36\lib\imp.py", line 234, in load_module return load_source(name, filename, file) File "c:\program files\python36\lib\imp.py", line 172, in load_source module = _load(spec) File "<frozen importlib._bootstrap>", line 675, in _load File "<frozen importlib._bootstrap>", line 655, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 677, in exec_module File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed File "c:\program files\python36\lib\site-packages\streamlink\plugins\livecodingtv.py", line 9, in <module> _url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') File "c:\program files\python36\lib\re.py", line 233, in compile return _compile(pattern, flags) File "c:\program files\python36\lib\re.py", line 301, in _compile p = sre_compile.compile(pattern, flags) File "c:\program files\python36\lib\sre_compile.py", line 562, in compile p = sre_parse.parse(p, flags) File "c:\program files\python36\lib\sre_parse.py", line 856, in parse p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, False) File "c:\program files\python36\lib\sre_parse.py", line 415, in _parse_sub itemsappend(_parse(source, state, verbose)) File "c:\program files\python36\lib\sre_parse.py", line 501, in _parse code = _escape(source, this, state) File "c:\program files\python36\lib\sre_parse.py", line 401, in _escape raise source.error("bad escape %s" % escape, len(escape)) sre_constants.error: bad escape \l at position 20 usage: streamlink [OPTIONS] [URL] [STREAM] Use -h/--help to see the available options or read the manual at http://docs.streamlink.io/ C:\WINDOWS\system32>python --version Python 3.6.0b2 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/livecodingtv.py` Content: ``` 1 import re 2 from streamlink.plugin import Plugin 3 from streamlink.stream import RTMPStream, HTTPStream 4 from streamlink.plugin.api import http 5 6 7 _vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"') 8 _rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+') 9 _url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') 10 11 12 class LivecodingTV(Plugin): 13 @classmethod 14 def can_handle_url(cls, url): 15 return _url_re.match(url) 16 17 def _get_streams(self): 18 res = http.get(self.url) 19 match = _rtmp_re.search(res.content.decode('utf-8')) 20 if match: 21 params = { 22 "rtmp": match.group(0), 23 "pageUrl": self.url, 24 "live": True, 25 } 26 yield 'live', RTMPStream(self.session, params) 27 return 28 29 match = _vod_re.search(res.content.decode('utf-8')) 30 if match: 31 yield 'vod', HTTPStream(self.session, match.group(1)) 32 33 __plugin__ = LivecodingTV 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/livecodingtv.py b/src/streamlink/plugins/livecodingtv.py --- a/src/streamlink/plugins/livecodingtv.py +++ b/src/streamlink/plugins/livecodingtv.py @@ -1,12 +1,20 @@ import re from streamlink.plugin import Plugin +from streamlink.stream import HLSStream from streamlink.stream import RTMPStream, HTTPStream from streamlink.plugin.api import http -_vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"') -_rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+') -_url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv') +_streams_re = re.compile(r""" + src:\s+"( + rtmp://.*?\?t=.*?| # RTMP stream + https?://.*?playlist.m3u8.*?\?t=.*?| # HLS stream + https?://.*?manifest.mpd.*?\?t=.*?| # DASH stream + https?://.*?.mp4\?t=.*? # HTTP stream + )".*? + type:\s+"(.*?)" # which stream type it is + """, re.M | re.DOTALL | re.VERBOSE) +_url_re = re.compile(r"http(s)?://(?:\w+\.)?livecoding\.tv") class LivecodingTV(Plugin): @@ -16,18 +24,19 @@ def _get_streams(self): res = http.get(self.url) - match = _rtmp_re.search(res.content.decode('utf-8')) - if match: - params = { - "rtmp": match.group(0), - "pageUrl": self.url, - "live": True, - } - yield 'live', RTMPStream(self.session, params) - return - - match = _vod_re.search(res.content.decode('utf-8')) - if match: - yield 'vod', HTTPStream(self.session, match.group(1)) + match = _streams_re.findall(res.content.decode('utf-8')) + for url, stream_type in match: + if stream_type == "rtmp/mp4" and RTMPStream.is_usable(self.session): + params = { + "rtmp": url, + "pageUrl": self.url, + "live": True, + } + yield 'live', RTMPStream(self.session, params) + elif stream_type == "application/x-mpegURL": + for s in HLSStream.parse_variant_playlist(self.session, url).items(): + yield s + elif stream_type == "video/mp4": + yield 'vod', HTTPStream(self.session, url) __plugin__ = LivecodingTV
{"golden_diff": "diff --git a/src/streamlink/plugins/livecodingtv.py b/src/streamlink/plugins/livecodingtv.py\n--- a/src/streamlink/plugins/livecodingtv.py\n+++ b/src/streamlink/plugins/livecodingtv.py\n@@ -1,12 +1,20 @@\n import re\n from streamlink.plugin import Plugin\n+from streamlink.stream import HLSStream\n from streamlink.stream import RTMPStream, HTTPStream\n from streamlink.plugin.api import http\n \n \n-_vod_re = re.compile('\\\"(http(s)?://.*\\.mp4\\?t=.*)\\\"')\n-_rtmp_re = re.compile('rtmp://[^\"]+/(?P<channel>\\w+)+[^/\"]+')\n-_url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n+_streams_re = re.compile(r\"\"\"\n+ src:\\s+\"(\n+ rtmp://.*?\\?t=.*?| # RTMP stream\n+ https?://.*?playlist.m3u8.*?\\?t=.*?| # HLS stream\n+ https?://.*?manifest.mpd.*?\\?t=.*?| # DASH stream\n+ https?://.*?.mp4\\?t=.*? # HTTP stream\n+ )\".*?\n+ type:\\s+\"(.*?)\" # which stream type it is\n+ \"\"\", re.M | re.DOTALL | re.VERBOSE)\n+_url_re = re.compile(r\"http(s)?://(?:\\w+\\.)?livecoding\\.tv\")\n \n \n class LivecodingTV(Plugin):\n@@ -16,18 +24,19 @@\n \n def _get_streams(self):\n res = http.get(self.url)\n- match = _rtmp_re.search(res.content.decode('utf-8'))\n- if match:\n- params = {\n- \"rtmp\": match.group(0),\n- \"pageUrl\": self.url,\n- \"live\": True,\n- }\n- yield 'live', RTMPStream(self.session, params)\n- return\n-\n- match = _vod_re.search(res.content.decode('utf-8'))\n- if match:\n- yield 'vod', HTTPStream(self.session, match.group(1))\n+ match = _streams_re.findall(res.content.decode('utf-8'))\n+ for url, stream_type in match:\n+ if stream_type == \"rtmp/mp4\" and RTMPStream.is_usable(self.session):\n+ params = {\n+ \"rtmp\": url,\n+ \"pageUrl\": self.url,\n+ \"live\": True,\n+ }\n+ yield 'live', RTMPStream(self.session, params)\n+ elif stream_type == \"application/x-mpegURL\":\n+ for s in HLSStream.parse_variant_playlist(self.session, url).items():\n+ yield s\n+ elif stream_type == \"video/mp4\":\n+ yield 'vod', HTTPStream(self.session, url)\n \n __plugin__ = LivecodingTV\n", "issue": "Plugin LivecodingTV fails to load on Python 3.6.0b2 on Windows 10 x64\nJust running streamlink raises the following error on my system with a fresh install:\n\n```\nC:\\WINDOWS\\system32>streamlink\nFailed to load plugin livecodingtv:\n File \"c:\\program files\\python36\\lib\\imp.py\", line 234, in load_module\n return load_source(name, filename, file)\n File \"c:\\program files\\python36\\lib\\imp.py\", line 172, in load_source\n module = _load(spec)\n File \"<frozen importlib._bootstrap>\", line 675, in _load\n File \"<frozen importlib._bootstrap>\", line 655, in _load_unlocked\n File \"<frozen importlib._bootstrap_external>\", line 677, in exec_module\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\n File \"c:\\program files\\python36\\lib\\site-packages\\streamlink\\plugins\\livecodingtv.py\", line 9, in <module>\n _url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n File \"c:\\program files\\python36\\lib\\re.py\", line 233, in compile\n return _compile(pattern, flags)\n File \"c:\\program files\\python36\\lib\\re.py\", line 301, in _compile\n p = sre_compile.compile(pattern, flags)\n File \"c:\\program files\\python36\\lib\\sre_compile.py\", line 562, in compile\n p = sre_parse.parse(p, flags)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 856, in parse\n p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, False)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 415, in _parse_sub\n itemsappend(_parse(source, state, verbose))\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 501, in _parse\n code = _escape(source, this, state)\n File \"c:\\program files\\python36\\lib\\sre_parse.py\", line 401, in _escape\n raise source.error(\"bad escape %s\" % escape, len(escape))\nsre_constants.error: bad escape \\l at position 20\n\nusage: streamlink [OPTIONS] [URL] [STREAM]\n\nUse -h/--help to see the available options or read the manual at http://docs.streamlink.io/\n\nC:\\WINDOWS\\system32>python --version\nPython 3.6.0b2\n```\n\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import RTMPStream, HTTPStream\nfrom streamlink.plugin.api import http\n\n\n_vod_re = re.compile('\\\"(http(s)?://.*\\.mp4\\?t=.*)\\\"')\n_rtmp_re = re.compile('rtmp://[^\"]+/(?P<channel>\\w+)+[^/\"]+')\n_url_re = re.compile('http(s)?://(?:\\w+.)?\\livecoding\\.tv')\n\n\nclass LivecodingTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _rtmp_re.search(res.content.decode('utf-8'))\n if match:\n params = {\n \"rtmp\": match.group(0),\n \"pageUrl\": self.url,\n \"live\": True,\n }\n yield 'live', RTMPStream(self.session, params)\n return\n\n match = _vod_re.search(res.content.decode('utf-8'))\n if match:\n yield 'vod', HTTPStream(self.session, match.group(1))\n\n__plugin__ = LivecodingTV\n", "path": "src/streamlink/plugins/livecodingtv.py"}], "after_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\nfrom streamlink.stream import RTMPStream, HTTPStream\nfrom streamlink.plugin.api import http\n\n\n_streams_re = re.compile(r\"\"\"\n src:\\s+\"(\n rtmp://.*?\\?t=.*?| # RTMP stream\n https?://.*?playlist.m3u8.*?\\?t=.*?| # HLS stream\n https?://.*?manifest.mpd.*?\\?t=.*?| # DASH stream\n https?://.*?.mp4\\?t=.*? # HTTP stream\n )\".*?\n type:\\s+\"(.*?)\" # which stream type it is\n \"\"\", re.M | re.DOTALL | re.VERBOSE)\n_url_re = re.compile(r\"http(s)?://(?:\\w+\\.)?livecoding\\.tv\")\n\n\nclass LivecodingTV(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _streams_re.findall(res.content.decode('utf-8'))\n for url, stream_type in match:\n if stream_type == \"rtmp/mp4\" and RTMPStream.is_usable(self.session):\n params = {\n \"rtmp\": url,\n \"pageUrl\": self.url,\n \"live\": True,\n }\n yield 'live', RTMPStream(self.session, params)\n elif stream_type == \"application/x-mpegURL\":\n for s in HLSStream.parse_variant_playlist(self.session, url).items():\n yield s\n elif stream_type == \"video/mp4\":\n yield 'vod', HTTPStream(self.session, url)\n\n__plugin__ = LivecodingTV\n", "path": "src/streamlink/plugins/livecodingtv.py"}]}
1,212
647