problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19570
|
rasdani/github-patches
|
git_diff
|
pretalx__pretalx-212
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect invite token yields server exception
When using an incorrect invitation token, pretalx does not fail gracefully.
Incorrect invite token yields server exception
When using an incorrect invitation token, pretalx does not fail gracefully.
</issue>
<code>
[start of src/pretalx/cfp/views/user.py]
1 import urllib
2
3 from csp.decorators import csp_update
4 from django.contrib import messages
5 from django.core.exceptions import ValidationError
6 from django.core.validators import validate_email
7 from django.http import Http404
8 from django.shortcuts import redirect
9 from django.utils.decorators import method_decorator
10 from django.utils.functional import cached_property
11 from django.views.generic import (
12 DetailView, FormView, ListView, TemplateView, UpdateView, View,
13 )
14
15 from pretalx.cfp.forms.submissions import (
16 InfoForm, QuestionsForm, SubmissionInvitationForm,
17 )
18 from pretalx.cfp.views.event import LoggedInEventPageMixin
19 from pretalx.common.phrases import phrases
20 from pretalx.person.forms import LoginInfoForm, SpeakerProfileForm
21 from pretalx.submission.models import Submission, SubmissionStates
22
23
24 @method_decorator(csp_update(STYLE_SRC="'self' 'unsafe-inline'", IMG_SRC="https://www.gravatar.com"), name='dispatch')
25 class ProfileView(LoggedInEventPageMixin, TemplateView):
26 template_name = 'cfp/event/user_profile.html'
27
28 @cached_property
29 def login_form(self):
30 return LoginInfoForm(user=self.request.user,
31 data=(self.request.POST
32 if self.request.method == 'POST'
33 and self.request.POST.get('form') == 'login'
34 else None))
35
36 @cached_property
37 def profile_form(self):
38 if self.request.method == 'POST' and self.request.POST.get('form') == 'profile':
39 return SpeakerProfileForm(
40 user=self.request.user,
41 event=self.request.event,
42 read_only=False,
43 data=self.request.POST,
44 files=self.request.FILES,
45 )
46 return SpeakerProfileForm(
47 user=self.request.user,
48 event=self.request.event,
49 read_only=False,
50 data=None,
51 )
52
53 @cached_property
54 def questions_form(self):
55 return QuestionsForm(
56 data=self.request.POST if self.request.method == 'POST' else None,
57 speaker=self.request.user,
58 event=self.request.event,
59 target='speaker',
60 request_user=self.request.user,
61 )
62
63 def get_context_data(self, event):
64 ctx = super().get_context_data()
65 ctx['login_form'] = self.login_form
66 ctx['profile_form'] = self.profile_form
67 ctx['questions_form'] = self.questions_form
68 ctx['questions_exist'] = self.request.event.questions.filter(active=True, target='speaker').exists()
69 return ctx
70
71 def post(self, request, *args, **kwargs):
72 if self.login_form.is_bound:
73 if self.login_form.is_valid():
74 self.login_form.save()
75 messages.success(self.request, phrases.base.saved)
76 request.user.log_action('pretalx.user.password.update')
77 return redirect('cfp:event.user.view', event=self.request.event.slug)
78 elif self.profile_form.is_bound:
79 if self.profile_form.is_valid():
80 self.profile_form.save()
81 messages.success(self.request, phrases.base.saved)
82 profile = self.request.user.profiles.get_or_create(event=self.request.event)[0]
83 profile.log_action('pretalx.user.profile.update', person=request.user)
84 return redirect('cfp:event.user.view', event=self.request.event.slug)
85 elif self.questions_form.is_bound:
86 if self.questions_form.is_valid():
87 self.questions_form.save()
88 messages.success(self.request, phrases.base.saved)
89 return redirect('cfp:event.user.view', event=self.request.event.slug)
90
91 messages.error(self.request, phrases.base.error_saving_changes)
92 return super().get(request, *args, **kwargs)
93
94
95 class SubmissionViewMixin:
96 def get_object(self):
97 try:
98 return self.request.event.submissions.prefetch_related('answers', 'answers__options').get(
99 speakers__in=[self.request.user],
100 code__iexact=self.kwargs.get('code')
101 )
102 except Submission.DoesNotExist:
103 try:
104 # Backwards compatibility
105 return self.request.event.submissions.prefetch_related('answers', 'answers__options').get(
106 speakers__in=[self.request.user],
107 id=self.kwargs.get('code')
108 )
109 except (Submission.DoesNotExist, ValueError):
110 raise Http404()
111
112
113 class SubmissionsListView(LoggedInEventPageMixin, ListView):
114 template_name = 'cfp/event/user_submissions.html'
115 context_object_name = 'submissions'
116
117 def get_queryset(self):
118 return self.request.event.submissions.filter(speakers__in=[self.request.user])
119
120
121 class SubmissionsWithdrawView(LoggedInEventPageMixin, SubmissionViewMixin, DetailView):
122 template_name = 'cfp/event/user_submission_withdraw.html'
123 model = Submission
124 context_object_name = 'submission'
125
126 def dispatch(self, request, *args, **kwargs):
127 self.object = self.get_object()
128 if self.object.state == SubmissionStates.SUBMITTED:
129 self.object.state = SubmissionStates.WITHDRAWN
130 self.object.save(update_fields=['state'])
131 self.object.log_action('pretalx.submission.withdrawal', person=request.user)
132 messages.success(self.request, phrases.cfp.submission_withdrawn)
133 else:
134 messages.error(self.request, phrases.cfp.submission_not_withdrawn)
135 return redirect('cfp:event.user.submissions', event=self.request.event.slug)
136
137
138 class SubmissionConfirmView(LoggedInEventPageMixin, SubmissionViewMixin, View):
139
140 def dispatch(self, request, *args, **kwargs):
141 if request.user.is_anonymous:
142 return redirect(request.event.urls.login)
143 submission = self.get_object()
144 if submission.state == SubmissionStates.ACCEPTED:
145 submission.confirm(person=request.user)
146 submission.log_action('pretalx.submission.confirm', person=request.user)
147 messages.success(self.request, phrases.cfp.submission_confirmed)
148 elif submission.state == SubmissionStates.CONFIRMED:
149 messages.success(self.request, phrases.cfp.submission_was_confirmed)
150 else:
151 messages.error(self.request, phrases.cfp.submission_not_confirmed)
152 return redirect('cfp:event.user.submissions', event=self.request.event.slug)
153
154
155 class SubmissionsEditView(LoggedInEventPageMixin, SubmissionViewMixin, UpdateView):
156 template_name = 'cfp/event/user_submission_edit.html'
157 model = Submission
158 form_class = InfoForm
159 context_object_name = 'submission'
160
161 def get_context_data(self, **kwargs):
162 ctx = super().get_context_data(**kwargs)
163 ctx['qform'] = self.qform
164 ctx['can_edit'] = self.can_edit
165 return ctx
166
167 @cached_property
168 def qform(self):
169 return QuestionsForm(
170 data=self.request.POST if self.request.method == 'POST' else None,
171 submission=self.object,
172 event=self.request.event,
173 readonly=not self.can_edit,
174 )
175
176 def post(self, request, *args, **kwargs):
177 self.object = self.get_object()
178 form = self.get_form()
179 if form.is_valid() and self.qform.is_valid():
180 return self.form_valid(form)
181 else:
182 return self.form_invalid(form)
183
184 @property
185 def can_edit(self):
186 return self.object.editable
187
188 def get_form_kwargs(self):
189 kwargs = super().get_form_kwargs()
190 kwargs['event'] = self.request.event
191 kwargs['readonly'] = not self.can_edit
192 return kwargs
193
194 def form_valid(self, form):
195 if self.can_edit:
196 form.save()
197 self.qform.save()
198 if form.has_changed():
199 form.instance.log_action('pretalx.submission.update', person=self.request.user)
200 messages.success(self.request, phrases.base.saved)
201 else:
202 messages.error(self.request, phrases.cfp.submission_uneditable)
203 return redirect('cfp:event.user.submissions', event=self.request.event.slug)
204
205
206 class DeleteAccountView(LoggedInEventPageMixin, View):
207
208 def post(self, request, event):
209
210 if request.POST.get('really'):
211 from django.contrib.auth import logout
212 request.user.deactivate()
213 logout(request)
214 messages.success(request, phrases.cfp.account_deleted)
215 return redirect(request.event.urls.base)
216 else:
217 messages.error(request, phrases.cfp.account_delete_confirm)
218 return redirect(request.event.urls.user + '?really')
219
220
221 class SubmissionInviteView(LoggedInEventPageMixin, SubmissionViewMixin, FormView):
222 form_class = SubmissionInvitationForm
223 template_name = 'cfp/event/user_submission_invitation.html'
224
225 def get_form_kwargs(self, *args, **kwargs):
226 kwargs = super().get_form_kwargs(*args, **kwargs)
227 kwargs['submission'] = self.get_object()
228 kwargs['speaker'] = self.request.user
229 if 'email' in self.request.GET and not self.request.method == 'POST':
230 initial = kwargs.get('initial', {})
231 initial['speaker'] = urllib.parse.unquote(self.request.GET['email'])
232 kwargs['initial'] = initial
233
234 try:
235 validate_email(initial['speaker'])
236 except ValidationError:
237 messages.warning(self.request, phrases.cfp.invite_invalid_email)
238 return kwargs
239
240 def get_context_data(self, *args, **kwargs):
241 ctx = super().get_context_data(*args, **kwargs)
242 ctx['submission'] = self.get_object()
243 ctx['invite_url'] = ctx['submission'].urls.accept_invitation.full(scheme='https')
244 return ctx
245
246 def form_valid(self, form):
247 form.save()
248 messages.success(self.request, phrases.cfp.invite_sent)
249 self.get_object().log_action('pretalx.submission.speakers.invite', person=self.request.user)
250 return super().form_valid(form)
251
252 def get_success_url(self):
253 return self.get_object().urls.user_base
254
255
256 class SubmissionInviteAcceptView(LoggedInEventPageMixin, DetailView):
257 template_name = 'cfp/event/invitation.html'
258 context_object_name = 'submission'
259
260 def get_object(self, *args, **kwargs):
261 return Submission.objects.get(
262 code__iexact=self.kwargs['code'],
263 invitation_token__iexact=self.kwargs['invitation'],
264 )
265
266 def post(self, *args, **kwargs):
267 submission = self.get_object()
268 submission.speakers.add(self.request.user)
269 submission.log_action('pretalx.submission.speakers.add', person=self.request.user)
270 submission.save()
271 messages.success(self.request, phrases.cfp.invite_accepted)
272 return redirect('cfp:event.user.view', event=self.request.event.slug)
273
[end of src/pretalx/cfp/views/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pretalx/cfp/views/user.py b/src/pretalx/cfp/views/user.py
--- a/src/pretalx/cfp/views/user.py
+++ b/src/pretalx/cfp/views/user.py
@@ -5,7 +5,7 @@
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import Http404
-from django.shortcuts import redirect
+from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.views.generic import (
@@ -258,7 +258,8 @@
context_object_name = 'submission'
def get_object(self, *args, **kwargs):
- return Submission.objects.get(
+ return get_object_or_404(
+ Submission,
code__iexact=self.kwargs['code'],
invitation_token__iexact=self.kwargs['invitation'],
)
|
{"golden_diff": "diff --git a/src/pretalx/cfp/views/user.py b/src/pretalx/cfp/views/user.py\n--- a/src/pretalx/cfp/views/user.py\n+++ b/src/pretalx/cfp/views/user.py\n@@ -5,7 +5,7 @@\n from django.core.exceptions import ValidationError\n from django.core.validators import validate_email\n from django.http import Http404\n-from django.shortcuts import redirect\n+from django.shortcuts import get_object_or_404, redirect\n from django.utils.decorators import method_decorator\n from django.utils.functional import cached_property\n from django.views.generic import (\n@@ -258,7 +258,8 @@\n context_object_name = 'submission'\n \n def get_object(self, *args, **kwargs):\n- return Submission.objects.get(\n+ return get_object_or_404(\n+ Submission,\n code__iexact=self.kwargs['code'],\n invitation_token__iexact=self.kwargs['invitation'],\n )\n", "issue": "Incorrect invite token yields server exception\nWhen using an incorrect invitation token, pretalx does not fail gracefully.\nIncorrect invite token yields server exception\nWhen using an incorrect invitation token, pretalx does not fail gracefully.\n", "before_files": [{"content": "import urllib\n\nfrom csp.decorators import csp_update\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.http import Http404\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.views.generic import (\n DetailView, FormView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretalx.cfp.forms.submissions import (\n InfoForm, QuestionsForm, SubmissionInvitationForm,\n)\nfrom pretalx.cfp.views.event import LoggedInEventPageMixin\nfrom pretalx.common.phrases import phrases\nfrom pretalx.person.forms import LoginInfoForm, SpeakerProfileForm\nfrom pretalx.submission.models import Submission, SubmissionStates\n\n\n@method_decorator(csp_update(STYLE_SRC=\"'self' 'unsafe-inline'\", IMG_SRC=\"https://www.gravatar.com\"), name='dispatch')\nclass ProfileView(LoggedInEventPageMixin, TemplateView):\n template_name = 'cfp/event/user_profile.html'\n\n @cached_property\n def login_form(self):\n return LoginInfoForm(user=self.request.user,\n data=(self.request.POST\n if self.request.method == 'POST'\n and self.request.POST.get('form') == 'login'\n else None))\n\n @cached_property\n def profile_form(self):\n if self.request.method == 'POST' and self.request.POST.get('form') == 'profile':\n return SpeakerProfileForm(\n user=self.request.user,\n event=self.request.event,\n read_only=False,\n data=self.request.POST,\n files=self.request.FILES,\n )\n return SpeakerProfileForm(\n user=self.request.user,\n event=self.request.event,\n read_only=False,\n data=None,\n )\n\n @cached_property\n def questions_form(self):\n return QuestionsForm(\n data=self.request.POST if self.request.method == 'POST' else None,\n speaker=self.request.user,\n event=self.request.event,\n target='speaker',\n request_user=self.request.user,\n )\n\n def get_context_data(self, event):\n ctx = super().get_context_data()\n ctx['login_form'] = self.login_form\n ctx['profile_form'] = self.profile_form\n ctx['questions_form'] = self.questions_form\n ctx['questions_exist'] = self.request.event.questions.filter(active=True, target='speaker').exists()\n return ctx\n\n def post(self, request, *args, **kwargs):\n if self.login_form.is_bound:\n if self.login_form.is_valid():\n self.login_form.save()\n messages.success(self.request, phrases.base.saved)\n request.user.log_action('pretalx.user.password.update')\n return redirect('cfp:event.user.view', event=self.request.event.slug)\n elif self.profile_form.is_bound:\n if self.profile_form.is_valid():\n self.profile_form.save()\n messages.success(self.request, phrases.base.saved)\n profile = self.request.user.profiles.get_or_create(event=self.request.event)[0]\n profile.log_action('pretalx.user.profile.update', person=request.user)\n return redirect('cfp:event.user.view', event=self.request.event.slug)\n elif self.questions_form.is_bound:\n if self.questions_form.is_valid():\n self.questions_form.save()\n messages.success(self.request, phrases.base.saved)\n return redirect('cfp:event.user.view', event=self.request.event.slug)\n\n messages.error(self.request, phrases.base.error_saving_changes)\n return super().get(request, *args, **kwargs)\n\n\nclass SubmissionViewMixin:\n def get_object(self):\n try:\n return self.request.event.submissions.prefetch_related('answers', 'answers__options').get(\n speakers__in=[self.request.user],\n code__iexact=self.kwargs.get('code')\n )\n except Submission.DoesNotExist:\n try:\n # Backwards compatibility\n return self.request.event.submissions.prefetch_related('answers', 'answers__options').get(\n speakers__in=[self.request.user],\n id=self.kwargs.get('code')\n )\n except (Submission.DoesNotExist, ValueError):\n raise Http404()\n\n\nclass SubmissionsListView(LoggedInEventPageMixin, ListView):\n template_name = 'cfp/event/user_submissions.html'\n context_object_name = 'submissions'\n\n def get_queryset(self):\n return self.request.event.submissions.filter(speakers__in=[self.request.user])\n\n\nclass SubmissionsWithdrawView(LoggedInEventPageMixin, SubmissionViewMixin, DetailView):\n template_name = 'cfp/event/user_submission_withdraw.html'\n model = Submission\n context_object_name = 'submission'\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n if self.object.state == SubmissionStates.SUBMITTED:\n self.object.state = SubmissionStates.WITHDRAWN\n self.object.save(update_fields=['state'])\n self.object.log_action('pretalx.submission.withdrawal', person=request.user)\n messages.success(self.request, phrases.cfp.submission_withdrawn)\n else:\n messages.error(self.request, phrases.cfp.submission_not_withdrawn)\n return redirect('cfp:event.user.submissions', event=self.request.event.slug)\n\n\nclass SubmissionConfirmView(LoggedInEventPageMixin, SubmissionViewMixin, View):\n\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_anonymous:\n return redirect(request.event.urls.login)\n submission = self.get_object()\n if submission.state == SubmissionStates.ACCEPTED:\n submission.confirm(person=request.user)\n submission.log_action('pretalx.submission.confirm', person=request.user)\n messages.success(self.request, phrases.cfp.submission_confirmed)\n elif submission.state == SubmissionStates.CONFIRMED:\n messages.success(self.request, phrases.cfp.submission_was_confirmed)\n else:\n messages.error(self.request, phrases.cfp.submission_not_confirmed)\n return redirect('cfp:event.user.submissions', event=self.request.event.slug)\n\n\nclass SubmissionsEditView(LoggedInEventPageMixin, SubmissionViewMixin, UpdateView):\n template_name = 'cfp/event/user_submission_edit.html'\n model = Submission\n form_class = InfoForm\n context_object_name = 'submission'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['qform'] = self.qform\n ctx['can_edit'] = self.can_edit\n return ctx\n\n @cached_property\n def qform(self):\n return QuestionsForm(\n data=self.request.POST if self.request.method == 'POST' else None,\n submission=self.object,\n event=self.request.event,\n readonly=not self.can_edit,\n )\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid() and self.qform.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n @property\n def can_edit(self):\n return self.object.editable\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['event'] = self.request.event\n kwargs['readonly'] = not self.can_edit\n return kwargs\n\n def form_valid(self, form):\n if self.can_edit:\n form.save()\n self.qform.save()\n if form.has_changed():\n form.instance.log_action('pretalx.submission.update', person=self.request.user)\n messages.success(self.request, phrases.base.saved)\n else:\n messages.error(self.request, phrases.cfp.submission_uneditable)\n return redirect('cfp:event.user.submissions', event=self.request.event.slug)\n\n\nclass DeleteAccountView(LoggedInEventPageMixin, View):\n\n def post(self, request, event):\n\n if request.POST.get('really'):\n from django.contrib.auth import logout\n request.user.deactivate()\n logout(request)\n messages.success(request, phrases.cfp.account_deleted)\n return redirect(request.event.urls.base)\n else:\n messages.error(request, phrases.cfp.account_delete_confirm)\n return redirect(request.event.urls.user + '?really')\n\n\nclass SubmissionInviteView(LoggedInEventPageMixin, SubmissionViewMixin, FormView):\n form_class = SubmissionInvitationForm\n template_name = 'cfp/event/user_submission_invitation.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super().get_form_kwargs(*args, **kwargs)\n kwargs['submission'] = self.get_object()\n kwargs['speaker'] = self.request.user\n if 'email' in self.request.GET and not self.request.method == 'POST':\n initial = kwargs.get('initial', {})\n initial['speaker'] = urllib.parse.unquote(self.request.GET['email'])\n kwargs['initial'] = initial\n\n try:\n validate_email(initial['speaker'])\n except ValidationError:\n messages.warning(self.request, phrases.cfp.invite_invalid_email)\n return kwargs\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['submission'] = self.get_object()\n ctx['invite_url'] = ctx['submission'].urls.accept_invitation.full(scheme='https')\n return ctx\n\n def form_valid(self, form):\n form.save()\n messages.success(self.request, phrases.cfp.invite_sent)\n self.get_object().log_action('pretalx.submission.speakers.invite', person=self.request.user)\n return super().form_valid(form)\n\n def get_success_url(self):\n return self.get_object().urls.user_base\n\n\nclass SubmissionInviteAcceptView(LoggedInEventPageMixin, DetailView):\n template_name = 'cfp/event/invitation.html'\n context_object_name = 'submission'\n\n def get_object(self, *args, **kwargs):\n return Submission.objects.get(\n code__iexact=self.kwargs['code'],\n invitation_token__iexact=self.kwargs['invitation'],\n )\n\n def post(self, *args, **kwargs):\n submission = self.get_object()\n submission.speakers.add(self.request.user)\n submission.log_action('pretalx.submission.speakers.add', person=self.request.user)\n submission.save()\n messages.success(self.request, phrases.cfp.invite_accepted)\n return redirect('cfp:event.user.view', event=self.request.event.slug)\n", "path": "src/pretalx/cfp/views/user.py"}]}
| 3,504 | 209 |
gh_patches_debug_33108
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-2486
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RemoteExceptionWrapper should not serialise unnecessarily.
**Is your feature request related to a problem? Please describe.**
Exceptions from tasks are wrapped in parsl's `RemoteExceptionWrapper` which is intended to be a helper that can ensure exceptions can be represented as values and moved between python runtimes.
As part of this, it always serializes exception information.
Serialization is not always necessary, but always happens: at least in the case of the local thread provider, the exception object never moves between python runtimes.
In some cases, exceptions cannot be serialised/deserialised properly: for example, the globus SDK does strange things with exception initializers (#785, #1997) which interact with the strange things RemoteExceptionWrapper is doing, but usually globus calls are made on the submit side (because the globus file staging providers run tasks locally).
**Describe the solution you'd like**
I would like RemoteExceptionWrapper to only perform the serialization/deserialization loop when necessary: when the RemoteExceptionWrapper itself is serialized/deserialized, rather than always.
**Describe alternatives you've considered**
Persuade Globus to do their exceptions differently.
**Additional context**
The only practical case that I have seen this affect things is the error handling path for globus file staging, although it is a broader theoretical problem.
</issue>
<code>
[start of parsl/app/errors.py]
1 """Exceptions raised by Apps."""
2 from functools import wraps
3 from typing import Callable, List, Union, Any, TypeVar, Optional
4 from types import TracebackType
5
6 import dill
7 import logging
8 from tblib import Traceback
9
10 from six import reraise
11
12 from parsl.data_provider.files import File
13
14 logger = logging.getLogger(__name__)
15
16
17 class ParslError(Exception):
18 """Base class for all exceptions.
19
20 Only to be invoked when a more specific error is not available.
21 """
22
23
24 class NotFutureError(ParslError):
25 """A non future item was passed to a function that expected a future.
26
27 This is basically a type error.
28 """
29
30
31 class AppException(ParslError):
32 """An error raised during execution of an app.
33
34 What this exception contains depends entirely on context
35 """
36
37
38 class AppBadFormatting(ParslError):
39 """An error raised during formatting of a bash function.
40 """
41
42
43 class BashExitFailure(AppException):
44 """A non-zero exit code returned from a @bash_app
45
46 Contains:
47 app name (str)
48 exitcode (int)
49 """
50
51 def __init__(self, app_name: str, exitcode: int) -> None:
52 self.app_name = app_name
53 self.exitcode = exitcode
54
55 def __str__(self) -> str:
56 return f"bash_app {self.app_name} failed with unix exit code {self.exitcode}"
57
58
59 class AppTimeout(AppException):
60 """An error raised during execution of an app when it exceeds its allotted walltime.
61 """
62
63
64 class BashAppNoReturn(AppException):
65 """Bash app returned no string.
66
67 Contains:
68 reason(string)
69 """
70
71 def __init__(self, reason: str) -> None:
72 super().__init__(reason)
73 self.reason = reason
74
75
76 class MissingOutputs(ParslError):
77 """Error raised at the end of app execution due to missing output files.
78
79 Contains:
80 reason(string)
81 outputs(List of strings/files..)
82 """
83
84 def __init__(self, reason: str, outputs: List[Union[str, File]]) -> None:
85 super().__init__(reason, outputs)
86 self.reason = reason
87 self.outputs = outputs
88
89 def __repr__(self) -> str:
90 return "Missing Outputs: {0}, Reason:{1}".format(self.outputs, self.reason)
91
92
93 class BadStdStreamFile(ParslError):
94 """Error raised due to bad filepaths specified for STDOUT/ STDERR.
95
96 Contains:
97 reason(string)
98 exception object
99 """
100
101 def __init__(self, reason: str, exception: Exception) -> None:
102 super().__init__(reason, exception)
103 self._reason = reason
104 self._exception = exception
105
106 def __repr__(self) -> str:
107 return "Bad Stream File: {} Exception: {}".format(self._reason, self._exception)
108
109 def __str__(self) -> str:
110 return self.__repr__()
111
112
113 class RemoteExceptionWrapper:
114 def __init__(self, e_type: type, e_value: BaseException, traceback: Optional[TracebackType]) -> None:
115
116 self.e_type = dill.dumps(e_type)
117 self.e_value = dill.dumps(e_value)
118 self.e_traceback = None if traceback is None else Traceback(traceback)
119 if e_value.__cause__ is None:
120 self.cause = None
121 else:
122 cause = e_value.__cause__
123 self.cause = self.__class__(type(cause), cause, cause.__traceback__)
124
125 def reraise(self) -> None:
126
127 t = dill.loads(self.e_type)
128
129 # the type is logged here before deserialising v and tb
130 # because occasionally there are problems deserialising the
131 # value (see #785, #548) and the fix is related to the
132 # specific exception type.
133 logger.debug("Reraising exception of type {}".format(t))
134
135 v = self.get_exception()
136
137 reraise(t, v, v.__traceback__)
138
139 def get_exception(self) -> Exception:
140 v = dill.loads(self.e_value)
141 if self.cause is not None:
142 v.__cause__ = self.cause.get_exception()
143 if self.e_traceback is not None:
144 tb = self.e_traceback.as_traceback()
145 return v.with_traceback(tb)
146 else:
147 return v
148
149
150 R = TypeVar('R')
151
152 # There appears to be no solution to typing this without a mypy plugin.
153 # The reason is because wrap_error maps a Callable[[X...], R] to a Callable[[X...], Union[R, R2]].
154 # However, there is no provision in Python typing for pattern matching all possible types of
155 # callable arguments. This is because Callable[] is, in the infinite wisdom of the typing module,
156 # only used for callbacks: "There is no syntax to indicate optional or keyword arguments; such
157 # function types are rarely used as callback types.".
158 # The alternative supported by the typing module, of saying Callable[..., R] ->
159 # Callable[..., Union[R, R2]] results in no pattern matching between the first and second
160 # ellipsis.
161 # Yet another bogus solution that was here previously would simply define wrap_error as
162 # wrap_error(T) -> T, where T was a custom TypeVar. This obviously missed the fact that
163 # the returned function had its return signature modified.
164 # Ultimately, the best choice appears to be Callable[..., R] -> Callable[..., Union[R, ?Exception]],
165 # since it results in the correct type specification for the return value(s) while treating the
166 # arguments as Any.
167
168
169 def wrap_error(func: Callable[..., R]) -> Callable[..., Union[R, RemoteExceptionWrapper]]:
170 @wraps(func) # type: ignore
171 def wrapper(*args: object, **kwargs: object) -> Any:
172 import sys
173 from parsl.app.errors import RemoteExceptionWrapper
174 try:
175 return func(*args, **kwargs) # type: ignore
176 except Exception:
177 return RemoteExceptionWrapper(*sys.exc_info())
178 return wrapper # type: ignore
179
[end of parsl/app/errors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/app/errors.py b/parsl/app/errors.py
--- a/parsl/app/errors.py
+++ b/parsl/app/errors.py
@@ -2,8 +2,6 @@
from functools import wraps
from typing import Callable, List, Union, Any, TypeVar, Optional
from types import TracebackType
-
-import dill
import logging
from tblib import Traceback
@@ -113,8 +111,8 @@
class RemoteExceptionWrapper:
def __init__(self, e_type: type, e_value: BaseException, traceback: Optional[TracebackType]) -> None:
- self.e_type = dill.dumps(e_type)
- self.e_value = dill.dumps(e_value)
+ self.e_type = e_type
+ self.e_value = e_value
self.e_traceback = None if traceback is None else Traceback(traceback)
if e_value.__cause__ is None:
self.cause = None
@@ -124,20 +122,20 @@
def reraise(self) -> None:
- t = dill.loads(self.e_type)
+ t = self.e_type
# the type is logged here before deserialising v and tb
# because occasionally there are problems deserialising the
# value (see #785, #548) and the fix is related to the
# specific exception type.
- logger.debug("Reraising exception of type {}".format(t))
+ logger.debug("Reraising exception of type {}".format(self.e_type))
v = self.get_exception()
reraise(t, v, v.__traceback__)
- def get_exception(self) -> Exception:
- v = dill.loads(self.e_value)
+ def get_exception(self) -> BaseException:
+ v = self.e_value
if self.cause is not None:
v.__cause__ = self.cause.get_exception()
if self.e_traceback is not None:
|
{"golden_diff": "diff --git a/parsl/app/errors.py b/parsl/app/errors.py\n--- a/parsl/app/errors.py\n+++ b/parsl/app/errors.py\n@@ -2,8 +2,6 @@\n from functools import wraps\n from typing import Callable, List, Union, Any, TypeVar, Optional\n from types import TracebackType\n-\n-import dill\n import logging\n from tblib import Traceback\n \n@@ -113,8 +111,8 @@\n class RemoteExceptionWrapper:\n def __init__(self, e_type: type, e_value: BaseException, traceback: Optional[TracebackType]) -> None:\n \n- self.e_type = dill.dumps(e_type)\n- self.e_value = dill.dumps(e_value)\n+ self.e_type = e_type\n+ self.e_value = e_value\n self.e_traceback = None if traceback is None else Traceback(traceback)\n if e_value.__cause__ is None:\n self.cause = None\n@@ -124,20 +122,20 @@\n \n def reraise(self) -> None:\n \n- t = dill.loads(self.e_type)\n+ t = self.e_type\n \n # the type is logged here before deserialising v and tb\n # because occasionally there are problems deserialising the\n # value (see #785, #548) and the fix is related to the\n # specific exception type.\n- logger.debug(\"Reraising exception of type {}\".format(t))\n+ logger.debug(\"Reraising exception of type {}\".format(self.e_type))\n \n v = self.get_exception()\n \n reraise(t, v, v.__traceback__)\n \n- def get_exception(self) -> Exception:\n- v = dill.loads(self.e_value)\n+ def get_exception(self) -> BaseException:\n+ v = self.e_value\n if self.cause is not None:\n v.__cause__ = self.cause.get_exception()\n if self.e_traceback is not None:\n", "issue": "RemoteExceptionWrapper should not serialise unnecessarily.\n**Is your feature request related to a problem? Please describe.**\r\n\r\nExceptions from tasks are wrapped in parsl's `RemoteExceptionWrapper` which is intended to be a helper that can ensure exceptions can be represented as values and moved between python runtimes.\r\n\r\nAs part of this, it always serializes exception information.\r\n\r\nSerialization is not always necessary, but always happens: at least in the case of the local thread provider, the exception object never moves between python runtimes.\r\n\r\nIn some cases, exceptions cannot be serialised/deserialised properly: for example, the globus SDK does strange things with exception initializers (#785, #1997) which interact with the strange things RemoteExceptionWrapper is doing, but usually globus calls are made on the submit side (because the globus file staging providers run tasks locally).\r\n\r\n**Describe the solution you'd like**\r\nI would like RemoteExceptionWrapper to only perform the serialization/deserialization loop when necessary: when the RemoteExceptionWrapper itself is serialized/deserialized, rather than always.\r\n\r\n**Describe alternatives you've considered**\r\nPersuade Globus to do their exceptions differently.\r\n\r\n**Additional context**\r\nThe only practical case that I have seen this affect things is the error handling path for globus file staging, although it is a broader theoretical problem.\n", "before_files": [{"content": "\"\"\"Exceptions raised by Apps.\"\"\"\nfrom functools import wraps\nfrom typing import Callable, List, Union, Any, TypeVar, Optional\nfrom types import TracebackType\n\nimport dill\nimport logging\nfrom tblib import Traceback\n\nfrom six import reraise\n\nfrom parsl.data_provider.files import File\n\nlogger = logging.getLogger(__name__)\n\n\nclass ParslError(Exception):\n \"\"\"Base class for all exceptions.\n\n Only to be invoked when a more specific error is not available.\n \"\"\"\n\n\nclass NotFutureError(ParslError):\n \"\"\"A non future item was passed to a function that expected a future.\n\n This is basically a type error.\n \"\"\"\n\n\nclass AppException(ParslError):\n \"\"\"An error raised during execution of an app.\n\n What this exception contains depends entirely on context\n \"\"\"\n\n\nclass AppBadFormatting(ParslError):\n \"\"\"An error raised during formatting of a bash function.\n \"\"\"\n\n\nclass BashExitFailure(AppException):\n \"\"\"A non-zero exit code returned from a @bash_app\n\n Contains:\n app name (str)\n exitcode (int)\n \"\"\"\n\n def __init__(self, app_name: str, exitcode: int) -> None:\n self.app_name = app_name\n self.exitcode = exitcode\n\n def __str__(self) -> str:\n return f\"bash_app {self.app_name} failed with unix exit code {self.exitcode}\"\n\n\nclass AppTimeout(AppException):\n \"\"\"An error raised during execution of an app when it exceeds its allotted walltime.\n \"\"\"\n\n\nclass BashAppNoReturn(AppException):\n \"\"\"Bash app returned no string.\n\n Contains:\n reason(string)\n \"\"\"\n\n def __init__(self, reason: str) -> None:\n super().__init__(reason)\n self.reason = reason\n\n\nclass MissingOutputs(ParslError):\n \"\"\"Error raised at the end of app execution due to missing output files.\n\n Contains:\n reason(string)\n outputs(List of strings/files..)\n \"\"\"\n\n def __init__(self, reason: str, outputs: List[Union[str, File]]) -> None:\n super().__init__(reason, outputs)\n self.reason = reason\n self.outputs = outputs\n\n def __repr__(self) -> str:\n return \"Missing Outputs: {0}, Reason:{1}\".format(self.outputs, self.reason)\n\n\nclass BadStdStreamFile(ParslError):\n \"\"\"Error raised due to bad filepaths specified for STDOUT/ STDERR.\n\n Contains:\n reason(string)\n exception object\n \"\"\"\n\n def __init__(self, reason: str, exception: Exception) -> None:\n super().__init__(reason, exception)\n self._reason = reason\n self._exception = exception\n\n def __repr__(self) -> str:\n return \"Bad Stream File: {} Exception: {}\".format(self._reason, self._exception)\n\n def __str__(self) -> str:\n return self.__repr__()\n\n\nclass RemoteExceptionWrapper:\n def __init__(self, e_type: type, e_value: BaseException, traceback: Optional[TracebackType]) -> None:\n\n self.e_type = dill.dumps(e_type)\n self.e_value = dill.dumps(e_value)\n self.e_traceback = None if traceback is None else Traceback(traceback)\n if e_value.__cause__ is None:\n self.cause = None\n else:\n cause = e_value.__cause__\n self.cause = self.__class__(type(cause), cause, cause.__traceback__)\n\n def reraise(self) -> None:\n\n t = dill.loads(self.e_type)\n\n # the type is logged here before deserialising v and tb\n # because occasionally there are problems deserialising the\n # value (see #785, #548) and the fix is related to the\n # specific exception type.\n logger.debug(\"Reraising exception of type {}\".format(t))\n\n v = self.get_exception()\n\n reraise(t, v, v.__traceback__)\n\n def get_exception(self) -> Exception:\n v = dill.loads(self.e_value)\n if self.cause is not None:\n v.__cause__ = self.cause.get_exception()\n if self.e_traceback is not None:\n tb = self.e_traceback.as_traceback()\n return v.with_traceback(tb)\n else:\n return v\n\n\nR = TypeVar('R')\n\n# There appears to be no solution to typing this without a mypy plugin.\n# The reason is because wrap_error maps a Callable[[X...], R] to a Callable[[X...], Union[R, R2]].\n# However, there is no provision in Python typing for pattern matching all possible types of\n# callable arguments. This is because Callable[] is, in the infinite wisdom of the typing module,\n# only used for callbacks: \"There is no syntax to indicate optional or keyword arguments; such\n# function types are rarely used as callback types.\".\n# The alternative supported by the typing module, of saying Callable[..., R] ->\n# Callable[..., Union[R, R2]] results in no pattern matching between the first and second\n# ellipsis.\n# Yet another bogus solution that was here previously would simply define wrap_error as\n# wrap_error(T) -> T, where T was a custom TypeVar. This obviously missed the fact that\n# the returned function had its return signature modified.\n# Ultimately, the best choice appears to be Callable[..., R] -> Callable[..., Union[R, ?Exception]],\n# since it results in the correct type specification for the return value(s) while treating the\n# arguments as Any.\n\n\ndef wrap_error(func: Callable[..., R]) -> Callable[..., Union[R, RemoteExceptionWrapper]]:\n @wraps(func) # type: ignore\n def wrapper(*args: object, **kwargs: object) -> Any:\n import sys\n from parsl.app.errors import RemoteExceptionWrapper\n try:\n return func(*args, **kwargs) # type: ignore\n except Exception:\n return RemoteExceptionWrapper(*sys.exc_info())\n return wrapper # type: ignore\n", "path": "parsl/app/errors.py"}]}
| 2,567 | 442 |
gh_patches_debug_10663
|
rasdani/github-patches
|
git_diff
|
shuup__shuup-2095
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Xtheme: there is no warning for usaved changes when switching between plugins
To reproduce:
1. Edit some content in Xtheme editor
2. Select another plugin without saving
3. See your changes to disappear
There probably should be warning before switching plugins when you have unsaved information.
</issue>
<code>
[start of shuup/campaigns/admin_module/forms/_basket.py]
1 # This file is part of Shuup.
2 #
3 # Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
4 #
5 # This source code is licensed under the OSL-3.0 license found in the
6 # LICENSE file in the root directory of this source tree.
7 from django import forms
8 from django.db.models import Q
9 from django.utils.translation import ugettext_lazy as _
10
11 from shuup.admin.shop_provider import get_shop
12 from shuup.admin.supplier_provider import get_supplier
13 from shuup.campaigns.models import BasketCampaign, Coupon
14
15 from ._base import BaseCampaignForm, QuickAddCouponSelect
16
17
18 class BasketCampaignForm(BaseCampaignForm):
19 class Meta(BaseCampaignForm.Meta):
20 model = BasketCampaign
21
22 def __init__(self, *args, **kwargs):
23 super(BasketCampaignForm, self).__init__(*args, **kwargs)
24
25 coupons = Coupon.objects.filter(
26 Q(active=True, shop=get_shop(self.request)),
27 Q(campaign=None) | Q(campaign=self.instance),
28 )
29 supplier = get_supplier(self.request)
30 if supplier:
31 coupons = coupons.filter(supplier=supplier)
32
33 coupon_code_choices = [('', '')] + list(coupons.values_list("pk", "code"))
34 field_kwargs = dict(choices=coupon_code_choices, required=False)
35 field_kwargs["help_text"] = _("Define the required coupon for this campaign.")
36 field_kwargs["label"] = _("Coupon")
37 field_kwargs["widget"] = QuickAddCouponSelect(editable_model="campaigns.Coupon")
38 if self.instance.pk and self.instance.coupon:
39 field_kwargs["initial"] = self.instance.coupon.pk
40
41 self.fields["coupon"] = forms.ChoiceField(**field_kwargs)
42
43 # the supplier will be, by default, the current one
44 if supplier:
45 self.fields["supplier"].widget = forms.HiddenInput()
46
47 def clean_coupon(self):
48 coupon = self.cleaned_data.get("coupon")
49 if coupon:
50 coupon = Coupon.objects.get(pk=coupon)
51 return coupon or None
52
53 def clean_supplier(self):
54 return self.cleaned_data.get("supplier") or get_supplier(self.request)
55
[end of shuup/campaigns/admin_module/forms/_basket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shuup/campaigns/admin_module/forms/_basket.py b/shuup/campaigns/admin_module/forms/_basket.py
--- a/shuup/campaigns/admin_module/forms/_basket.py
+++ b/shuup/campaigns/admin_module/forms/_basket.py
@@ -30,7 +30,7 @@
if supplier:
coupons = coupons.filter(supplier=supplier)
- coupon_code_choices = [('', '')] + list(coupons.values_list("pk", "code"))
+ coupon_code_choices = [('', '---------')] + list(coupons.values_list("pk", "code"))
field_kwargs = dict(choices=coupon_code_choices, required=False)
field_kwargs["help_text"] = _("Define the required coupon for this campaign.")
field_kwargs["label"] = _("Coupon")
|
{"golden_diff": "diff --git a/shuup/campaigns/admin_module/forms/_basket.py b/shuup/campaigns/admin_module/forms/_basket.py\n--- a/shuup/campaigns/admin_module/forms/_basket.py\n+++ b/shuup/campaigns/admin_module/forms/_basket.py\n@@ -30,7 +30,7 @@\n if supplier:\n coupons = coupons.filter(supplier=supplier)\n \n- coupon_code_choices = [('', '')] + list(coupons.values_list(\"pk\", \"code\"))\n+ coupon_code_choices = [('', '---------')] + list(coupons.values_list(\"pk\", \"code\"))\n field_kwargs = dict(choices=coupon_code_choices, required=False)\n field_kwargs[\"help_text\"] = _(\"Define the required coupon for this campaign.\")\n field_kwargs[\"label\"] = _(\"Coupon\")\n", "issue": "Xtheme: there is no warning for usaved changes when switching between plugins\nTo reproduce:\r\n1. Edit some content in Xtheme editor\r\n2. Select another plugin without saving\r\n3. See your changes to disappear\r\n\r\nThere probably should be warning before switching plugins when you have unsaved information.\n", "before_files": [{"content": "# This file is part of Shuup.\n#\n# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom django import forms\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shuup.admin.shop_provider import get_shop\nfrom shuup.admin.supplier_provider import get_supplier\nfrom shuup.campaigns.models import BasketCampaign, Coupon\n\nfrom ._base import BaseCampaignForm, QuickAddCouponSelect\n\n\nclass BasketCampaignForm(BaseCampaignForm):\n class Meta(BaseCampaignForm.Meta):\n model = BasketCampaign\n\n def __init__(self, *args, **kwargs):\n super(BasketCampaignForm, self).__init__(*args, **kwargs)\n\n coupons = Coupon.objects.filter(\n Q(active=True, shop=get_shop(self.request)),\n Q(campaign=None) | Q(campaign=self.instance),\n )\n supplier = get_supplier(self.request)\n if supplier:\n coupons = coupons.filter(supplier=supplier)\n\n coupon_code_choices = [('', '')] + list(coupons.values_list(\"pk\", \"code\"))\n field_kwargs = dict(choices=coupon_code_choices, required=False)\n field_kwargs[\"help_text\"] = _(\"Define the required coupon for this campaign.\")\n field_kwargs[\"label\"] = _(\"Coupon\")\n field_kwargs[\"widget\"] = QuickAddCouponSelect(editable_model=\"campaigns.Coupon\")\n if self.instance.pk and self.instance.coupon:\n field_kwargs[\"initial\"] = self.instance.coupon.pk\n\n self.fields[\"coupon\"] = forms.ChoiceField(**field_kwargs)\n\n # the supplier will be, by default, the current one\n if supplier:\n self.fields[\"supplier\"].widget = forms.HiddenInput()\n\n def clean_coupon(self):\n coupon = self.cleaned_data.get(\"coupon\")\n if coupon:\n coupon = Coupon.objects.get(pk=coupon)\n return coupon or None\n\n def clean_supplier(self):\n return self.cleaned_data.get(\"supplier\") or get_supplier(self.request)\n", "path": "shuup/campaigns/admin_module/forms/_basket.py"}]}
| 1,176 | 175 |
gh_patches_debug_30949
|
rasdani/github-patches
|
git_diff
|
apache__airflow-24496
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
db+ string in result backend but not metadata secret
### Official Helm Chart version
1.1.0 (latest released)
### Apache Airflow version
2.1.3 (latest released)
### Kubernetes Version
1.21
### Helm Chart configuration
data:
metadataSecretName: "airflow-metadata"
resultBackendSecretName: "airflow-result-backend"
### Docker Image customisations
_No response_
### What happened
If we only supply 1 secret with
```
connection: postgresql://airflow:[email protected]:5432/airflow?sslmode=disable
```
To use for both metadata and resultBackendConnection then we end up with a connection error because
resultBackendConnection expects the string to be formatted like
```
connection: db+postgresql://airflow:[email protected]:5432/airflow?sslmode=disable
```
from what i can tell
### What you expected to happen
I'd expect to be able to use the same secret for both using the same format if they are using the same connection.
### How to reproduce
Make a secret structured like above to look like the metadataConnection auto-generated secret.
use that same secret for the result backend.
deploy.
### Anything else
Occurs always.
To get around currently we make 2 secrets one with just the db+ prepended.
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/config_templates/default_celery.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """Default celery configuration."""
19 import logging
20 import ssl
21
22 from airflow.configuration import conf
23 from airflow.exceptions import AirflowConfigException, AirflowException
24
25
26 def _broker_supports_visibility_timeout(url):
27 return url.startswith("redis://") or url.startswith("sqs://")
28
29
30 log = logging.getLogger(__name__)
31
32 broker_url = conf.get('celery', 'BROKER_URL')
33
34 broker_transport_options = conf.getsection('celery_broker_transport_options') or {}
35 if 'visibility_timeout' not in broker_transport_options:
36 if _broker_supports_visibility_timeout(broker_url):
37 broker_transport_options['visibility_timeout'] = 21600
38
39 DEFAULT_CELERY_CONFIG = {
40 'accept_content': ['json'],
41 'event_serializer': 'json',
42 'worker_prefetch_multiplier': conf.getint('celery', 'worker_prefetch_multiplier'),
43 'task_acks_late': True,
44 'task_default_queue': conf.get('operators', 'DEFAULT_QUEUE'),
45 'task_default_exchange': conf.get('operators', 'DEFAULT_QUEUE'),
46 'task_track_started': conf.getboolean('celery', 'task_track_started'),
47 'broker_url': broker_url,
48 'broker_transport_options': broker_transport_options,
49 'result_backend': conf.get('celery', 'RESULT_BACKEND'),
50 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),
51 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),
52 }
53
54 celery_ssl_active = False
55 try:
56 celery_ssl_active = conf.getboolean('celery', 'SSL_ACTIVE')
57 except AirflowConfigException:
58 log.warning("Celery Executor will run without SSL")
59
60 try:
61 if celery_ssl_active:
62 if broker_url and 'amqp://' in broker_url:
63 broker_use_ssl = {
64 'keyfile': conf.get('celery', 'SSL_KEY'),
65 'certfile': conf.get('celery', 'SSL_CERT'),
66 'ca_certs': conf.get('celery', 'SSL_CACERT'),
67 'cert_reqs': ssl.CERT_REQUIRED,
68 }
69 elif broker_url and 'redis://' in broker_url:
70 broker_use_ssl = {
71 'ssl_keyfile': conf.get('celery', 'SSL_KEY'),
72 'ssl_certfile': conf.get('celery', 'SSL_CERT'),
73 'ssl_ca_certs': conf.get('celery', 'SSL_CACERT'),
74 'ssl_cert_reqs': ssl.CERT_REQUIRED,
75 }
76 else:
77 raise AirflowException(
78 'The broker you configured does not support SSL_ACTIVE to be True. '
79 'Please use RabbitMQ or Redis if you would like to use SSL for broker.'
80 )
81
82 DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl
83 except AirflowConfigException:
84 raise AirflowException(
85 'AirflowConfigException: SSL_ACTIVE is True, '
86 'please ensure SSL_KEY, '
87 'SSL_CERT and SSL_CACERT are set'
88 )
89 except Exception as e:
90 raise AirflowException(
91 f'Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have '
92 f'all necessary certs and key ({e}).'
93 )
94
95 result_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])
96 if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:
97 log.warning(
98 "You have configured a result_backend of %s, it is highly recommended "
99 "to use an alternative result_backend (i.e. a database).",
100 result_backend,
101 )
102
[end of airflow/config_templates/default_celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/config_templates/default_celery.py b/airflow/config_templates/default_celery.py
--- a/airflow/config_templates/default_celery.py
+++ b/airflow/config_templates/default_celery.py
@@ -36,6 +36,12 @@
if _broker_supports_visibility_timeout(broker_url):
broker_transport_options['visibility_timeout'] = 21600
+if conf.has_option("celery", 'RESULT_BACKEND'):
+ result_backend = conf.get_mandatory_value('celery', 'RESULT_BACKEND')
+else:
+ log.debug("Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.")
+ result_backend = f'db+{conf.get("database", "SQL_ALCHEMY_CONN")}'
+
DEFAULT_CELERY_CONFIG = {
'accept_content': ['json'],
'event_serializer': 'json',
@@ -46,7 +52,7 @@
'task_track_started': conf.getboolean('celery', 'task_track_started'),
'broker_url': broker_url,
'broker_transport_options': broker_transport_options,
- 'result_backend': conf.get('celery', 'RESULT_BACKEND'),
+ 'result_backend': result_backend,
'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),
'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),
}
@@ -92,7 +98,6 @@
f'all necessary certs and key ({e}).'
)
-result_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])
if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:
log.warning(
"You have configured a result_backend of %s, it is highly recommended "
|
{"golden_diff": "diff --git a/airflow/config_templates/default_celery.py b/airflow/config_templates/default_celery.py\n--- a/airflow/config_templates/default_celery.py\n+++ b/airflow/config_templates/default_celery.py\n@@ -36,6 +36,12 @@\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options['visibility_timeout'] = 21600\n \n+if conf.has_option(\"celery\", 'RESULT_BACKEND'):\n+ result_backend = conf.get_mandatory_value('celery', 'RESULT_BACKEND')\n+else:\n+ log.debug(\"Value for celery result_backend not found. Using sql_alchemy_conn with db+ prefix.\")\n+ result_backend = f'db+{conf.get(\"database\", \"SQL_ALCHEMY_CONN\")}'\n+\n DEFAULT_CELERY_CONFIG = {\n 'accept_content': ['json'],\n 'event_serializer': 'json',\n@@ -46,7 +52,7 @@\n 'task_track_started': conf.getboolean('celery', 'task_track_started'),\n 'broker_url': broker_url,\n 'broker_transport_options': broker_transport_options,\n- 'result_backend': conf.get('celery', 'RESULT_BACKEND'),\n+ 'result_backend': result_backend,\n 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),\n 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),\n }\n@@ -92,7 +98,6 @@\n f'all necessary certs and key ({e}).'\n )\n \n-result_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])\n if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:\n log.warning(\n \"You have configured a result_backend of %s, it is highly recommended \"\n", "issue": "db+ string in result backend but not metadata secret\n### Official Helm Chart version\n\n1.1.0 (latest released)\n\n### Apache Airflow version\n\n2.1.3 (latest released)\n\n### Kubernetes Version\n\n1.21\n\n### Helm Chart configuration\n\n data:\r\n metadataSecretName: \"airflow-metadata\"\r\n resultBackendSecretName: \"airflow-result-backend\"\n\n### Docker Image customisations\n\n_No response_\n\n### What happened\n\nIf we only supply 1 secret with \r\n```\r\nconnection: postgresql://airflow:[email protected]:5432/airflow?sslmode=disable\r\n```\r\nTo use for both metadata and resultBackendConnection then we end up with a connection error because\r\nresultBackendConnection expects the string to be formatted like \r\n```\r\nconnection: db+postgresql://airflow:[email protected]:5432/airflow?sslmode=disable\r\n```\r\nfrom what i can tell\n\n### What you expected to happen\n\nI'd expect to be able to use the same secret for both using the same format if they are using the same connection. \n\n### How to reproduce\n\nMake a secret structured like above to look like the metadataConnection auto-generated secret.\r\nuse that same secret for the result backend.\r\ndeploy.\r\n\n\n### Anything else\n\nOccurs always. \r\nTo get around currently we make 2 secrets one with just the db+ prepended. \n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Default celery configuration.\"\"\"\nimport logging\nimport ssl\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException, AirflowException\n\n\ndef _broker_supports_visibility_timeout(url):\n return url.startswith(\"redis://\") or url.startswith(\"sqs://\")\n\n\nlog = logging.getLogger(__name__)\n\nbroker_url = conf.get('celery', 'BROKER_URL')\n\nbroker_transport_options = conf.getsection('celery_broker_transport_options') or {}\nif 'visibility_timeout' not in broker_transport_options:\n if _broker_supports_visibility_timeout(broker_url):\n broker_transport_options['visibility_timeout'] = 21600\n\nDEFAULT_CELERY_CONFIG = {\n 'accept_content': ['json'],\n 'event_serializer': 'json',\n 'worker_prefetch_multiplier': conf.getint('celery', 'worker_prefetch_multiplier'),\n 'task_acks_late': True,\n 'task_default_queue': conf.get('operators', 'DEFAULT_QUEUE'),\n 'task_default_exchange': conf.get('operators', 'DEFAULT_QUEUE'),\n 'task_track_started': conf.getboolean('celery', 'task_track_started'),\n 'broker_url': broker_url,\n 'broker_transport_options': broker_transport_options,\n 'result_backend': conf.get('celery', 'RESULT_BACKEND'),\n 'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),\n 'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),\n}\n\ncelery_ssl_active = False\ntry:\n celery_ssl_active = conf.getboolean('celery', 'SSL_ACTIVE')\nexcept AirflowConfigException:\n log.warning(\"Celery Executor will run without SSL\")\n\ntry:\n if celery_ssl_active:\n if broker_url and 'amqp://' in broker_url:\n broker_use_ssl = {\n 'keyfile': conf.get('celery', 'SSL_KEY'),\n 'certfile': conf.get('celery', 'SSL_CERT'),\n 'ca_certs': conf.get('celery', 'SSL_CACERT'),\n 'cert_reqs': ssl.CERT_REQUIRED,\n }\n elif broker_url and 'redis://' in broker_url:\n broker_use_ssl = {\n 'ssl_keyfile': conf.get('celery', 'SSL_KEY'),\n 'ssl_certfile': conf.get('celery', 'SSL_CERT'),\n 'ssl_ca_certs': conf.get('celery', 'SSL_CACERT'),\n 'ssl_cert_reqs': ssl.CERT_REQUIRED,\n }\n else:\n raise AirflowException(\n 'The broker you configured does not support SSL_ACTIVE to be True. '\n 'Please use RabbitMQ or Redis if you would like to use SSL for broker.'\n )\n\n DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl\nexcept AirflowConfigException:\n raise AirflowException(\n 'AirflowConfigException: SSL_ACTIVE is True, '\n 'please ensure SSL_KEY, '\n 'SSL_CERT and SSL_CACERT are set'\n )\nexcept Exception as e:\n raise AirflowException(\n f'Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have '\n f'all necessary certs and key ({e}).'\n )\n\nresult_backend = str(DEFAULT_CELERY_CONFIG['result_backend'])\nif 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:\n log.warning(\n \"You have configured a result_backend of %s, it is highly recommended \"\n \"to use an alternative result_backend (i.e. a database).\",\n result_backend,\n )\n", "path": "airflow/config_templates/default_celery.py"}]}
| 2,036 | 393 |
gh_patches_debug_18040
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-1652
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make it possible to detach plan from project
now that a connection of a project with a plan is no longer obligatory, initiators need to be able to undo a connection. Once I click on a plan I can no longer NOT connect to a plan, which should be possible.

</issue>
<code>
[start of meinberlin/apps/plans/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.db.models import Q
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.dashboard.components.forms import ProjectDashboardForm
7 from adhocracy4.maps import widgets as maps_widgets
8 from adhocracy4.projects import models as project_models
9 from meinberlin.apps.contrib import widgets as contrib_widgets
10
11 from . import models
12
13
14 def get_theme_options():
15 return models.Plan.objects\
16 .filter(~Q(theme=''))\
17 .order_by('theme')\
18 .values_list('theme', flat=True)\
19 .distinct()
20
21
22 class PlanForm(forms.ModelForm):
23
24 class Meta:
25 model = models.Plan
26 fields = [
27 'title',
28 'description_image',
29 'contact',
30 'point',
31 'point_label',
32 'district',
33 'cost',
34 'description',
35 'theme',
36 'status',
37 'participation']
38 widgets = {
39 'point': maps_widgets.MapChoosePointWidget(
40 polygon=settings.BERLIN_POLYGON),
41 'theme': contrib_widgets.TextWithDatalistWidget(attrs={
42 'options': get_theme_options
43 })
44 }
45 error_messages = {
46 'point': {
47 'required': _('Please locate the plan on the map.')
48 }
49 }
50
51 def __init__(self, *args, **kwargs):
52 super().__init__(*args, **kwargs)
53 self.fields['district'].empty_label = _('City wide')
54
55
56 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
57
58 widget = forms.RadioSelect
59
60 def clean(self, value):
61 if value is None:
62 return super().clean([])
63 return super().clean([value])
64
65
66 class ProjectPlansDashboardForm(ProjectDashboardForm):
67 plans = CustomMultipleChoiceField(queryset=None,
68 label=_('Plans'))
69
70 class Meta:
71 model = project_models.Project
72 fields = ['plans']
73 required = False
74
75 def save(self, commit=False):
76 plans = self.cleaned_data['plans']
77 self.instance.plans.set(plans)
78
79 def __init__(self, *args, **kwargs):
80 super().__init__(*args, **kwargs)
81 self.initial['plans'] = self.instance.plans.all()
82 self.fields['plans'
83 ].queryset = self.instance.organisation.plan_set.all()
84
[end of meinberlin/apps/plans/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py
--- a/meinberlin/apps/plans/forms.py
+++ b/meinberlin/apps/plans/forms.py
@@ -55,10 +55,10 @@
class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
- widget = forms.RadioSelect
+ widget = forms.Select
def clean(self, value):
- if value is None:
+ if not value:
return super().clean([])
return super().clean([value])
@@ -79,5 +79,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial['plans'] = self.instance.plans.all()
- self.fields['plans'
- ].queryset = self.instance.organisation.plan_set.all()
+ self.fields['plans'].required = False
+ self.fields['plans'].empty_label = '----------'
+ self.fields['plans'].queryset = \
+ self.instance.organisation.plan_set.all()
|
{"golden_diff": "diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -55,10 +55,10 @@\n \n class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n \n- widget = forms.RadioSelect\n+ widget = forms.Select\n \n def clean(self, value):\n- if value is None:\n+ if not value:\n return super().clean([])\n return super().clean([value])\n \n@@ -79,5 +79,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n- self.fields['plans'\n- ].queryset = self.instance.organisation.plan_set.all()\n+ self.fields['plans'].required = False\n+ self.fields['plans'].empty_label = '----------'\n+ self.fields['plans'].queryset = \\\n+ self.instance.organisation.plan_set.all()\n", "issue": "make it possible to detach plan from project\nnow that a connection of a project with a plan is no longer obligatory, initiators need to be able to undo a connection. Once I click on a plan I can no longer NOT connect to a plan, which should be possible.\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.contrib import widgets as contrib_widgets\n\nfrom . import models\n\n\ndef get_theme_options():\n return models.Plan.objects\\\n .filter(~Q(theme=''))\\\n .order_by('theme')\\\n .values_list('theme', flat=True)\\\n .distinct()\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'theme',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON),\n 'theme': contrib_widgets.TextWithDatalistWidget(attrs={\n 'options': get_theme_options\n })\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.RadioSelect\n\n def clean(self, value):\n if value is None:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'\n ].queryset = self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}]}
| 1,316 | 240 |
gh_patches_debug_1022
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-437
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: microsoft academic engine
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Repository: https://github.com/tiekoetter/searxng
Branch: master
Version: 1.0.0-972-93548243
<!-- Check if these values are correct -->
**How did you install SearXNG?**
<!-- Did you install SearXNG using the official wiki or using searxng-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->
**Technical report**
Error
* Error: httpx.TimeoutException
* Percentage: 50
* Parameters: `(None, None, None)`
* File name: `searx/search/processors/online.py:97`
* Function: `_send_http_request`
* Code: `response = req(params['url'], **request_args)`
</issue>
<code>
[start of searx/engines/microsoft_academic.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """
3 Microsoft Academic (Science)
4 """
5
6 from json import dumps, loads
7 from searx.utils import html_to_text
8
9 # about
10 about = {
11 "website": 'https://academic.microsoft.com',
12 "wikidata_id": 'Q28136779',
13 "official_api_documentation": 'http://ma-graph.org/',
14 "use_official_api": False,
15 "require_api_key": False,
16 "results": 'JSON',
17 }
18
19 categories = ['images']
20 paging = True
21 search_url = 'https://academic.microsoft.com/api/search'
22 _paper_url = 'https://academic.microsoft.com/paper/{id}/reference'
23
24
25 def request(query, params):
26 params['url'] = search_url
27 params['method'] = 'POST'
28 params['headers']['content-type'] = 'application/json; charset=utf-8'
29 params['data'] = dumps({
30 'query': query,
31 'queryExpression': '',
32 'filters': [],
33 'orderBy': 0,
34 'skip': (params['pageno'] - 1) * 10,
35 'sortAscending': True,
36 'take': 10,
37 'includeCitationContexts': False,
38 'profileId': '',
39 })
40
41 return params
42
43
44 def response(resp):
45 results = []
46 response_data = loads(resp.text)
47 if not response_data:
48 return results
49
50 for result in response_data['pr']:
51 if 'dn' not in result['paper']:
52 continue
53
54 title = result['paper']['dn']
55 content = _get_content(result['paper'])
56 url = _paper_url.format(id=result['paper']['id'])
57 results.append({
58 'url': url,
59 'title': html_to_text(title),
60 'content': html_to_text(content),
61 })
62
63 return results
64
65
66 def _get_content(result):
67 if 'd' in result:
68 content = result['d']
69 if len(content) > 300:
70 return content[:300] + '...'
71 return content
72
73 return ''
74
[end of searx/engines/microsoft_academic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py
--- a/searx/engines/microsoft_academic.py
+++ b/searx/engines/microsoft_academic.py
@@ -47,7 +47,7 @@
if not response_data:
return results
- for result in response_data['pr']:
+ for result in response_data.get('pr', {}):
if 'dn' not in result['paper']:
continue
|
{"golden_diff": "diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py\n--- a/searx/engines/microsoft_academic.py\n+++ b/searx/engines/microsoft_academic.py\n@@ -47,7 +47,7 @@\n if not response_data:\n return results\n \n- for result in response_data['pr']:\n+ for result in response_data.get('pr', {}):\n if 'dn' not in result['paper']:\n continue\n", "issue": "Bug: microsoft academic engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/tiekoetter/searxng\r\nBranch: master\r\nVersion: 1.0.0-972-93548243\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\n<!-- Did you install SearXNG using the official wiki or using searxng-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: httpx.TimeoutException\r\n * Percentage: 50\r\n * Parameters: `(None, None, None)`\r\n * File name: `searx/search/processors/online.py:97`\r\n * Function: `_send_http_request`\r\n * Code: `response = req(params['url'], **request_args)`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Microsoft Academic (Science)\n\"\"\"\n\nfrom json import dumps, loads\nfrom searx.utils import html_to_text\n\n# about\nabout = {\n \"website\": 'https://academic.microsoft.com',\n \"wikidata_id\": 'Q28136779',\n \"official_api_documentation\": 'http://ma-graph.org/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\ncategories = ['images']\npaging = True\nsearch_url = 'https://academic.microsoft.com/api/search'\n_paper_url = 'https://academic.microsoft.com/paper/{id}/reference'\n\n\ndef request(query, params):\n params['url'] = search_url\n params['method'] = 'POST'\n params['headers']['content-type'] = 'application/json; charset=utf-8'\n params['data'] = dumps({\n 'query': query,\n 'queryExpression': '',\n 'filters': [],\n 'orderBy': 0,\n 'skip': (params['pageno'] - 1) * 10,\n 'sortAscending': True,\n 'take': 10,\n 'includeCitationContexts': False,\n 'profileId': '',\n })\n\n return params\n\n\ndef response(resp):\n results = []\n response_data = loads(resp.text)\n if not response_data:\n return results\n\n for result in response_data['pr']:\n if 'dn' not in result['paper']:\n continue\n\n title = result['paper']['dn']\n content = _get_content(result['paper'])\n url = _paper_url.format(id=result['paper']['id'])\n results.append({\n 'url': url,\n 'title': html_to_text(title),\n 'content': html_to_text(content),\n })\n\n return results\n\n\ndef _get_content(result):\n if 'd' in result:\n content = result['d']\n if len(content) > 300:\n return content[:300] + '...'\n return content\n\n return ''\n", "path": "searx/engines/microsoft_academic.py"}]}
| 1,457 | 115 |
gh_patches_debug_30987
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-5293
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'kubeflow-pipelines-profile-controller' fails to deploy pods on profile creation when ResourceQuota is set in the profile.
In multi-user mode, it seems like `kubeflow-pipelines-profile-controller` deploy below pods on Kubeflow profile creation.
- `ml-pipeline-ui-artifact`
- `ml-pipeline-visualizationserver`
When `ResourceQuota` is set in the profile, `kubeflow-pipelines-profile-controller` fails to deploy `ml-pipeline-ui-artifact` and `ml-pipeline-visualizationserver` with below error.
```
Warning FailedCreate 17m replicaset-controller
Error creating: pods "ml-pipeline-ui-artifact-684c5db68-s74w8" is forbidden: failed quota: kf-resource-quota: must specify cpu,memory
```
- Related code: [https://github.com/kubeflow/manifests/blob/master/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py](https://github.com/kubeflow/manifests/blob/master/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py)
- Cause: The container resource limit & request is not set on the pod specs, so the pods can not be deployed in the namespace (which has `ResourceQuota` ).
Since Kubeflow profile supports setting `ResourceQuota`, `kubeflow-pipelines-profile-controller` should set container resource requests & limits in pod specs to avoid above errors.
I confirmed that with below patch, ml-pipeline pods are successfully deployed.
```python
diff --git a/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py b/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py
index 75c6e5db..a0e71fbf 100644
--- a/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py
+++ b/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py
@@ -104,6 +104,16 @@ class Controller(BaseHTTPRequestHandler):
"ports": [{
"containerPort": 8888
}],
+ "resources": {
+ "requests": {
+ "cpu": "50m",
+ "memory": "200Mi"
+ },
+ "limits": {
+ "cpu": "500m",
+ "memory": "2Gi"
+ },
+ }
}],
"serviceAccountName":
"default-editor",
@@ -204,7 +214,17 @@ class Controller(BaseHTTPRequestHandler):
"IfNotPresent",
"ports": [{
"containerPort": 3000
- }]
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "50m",
+ "memory": "200Mi"
+ },
+ "limits": {
+ "cpu": "500m",
+ "memory": "2Gi"
+ },
+ }
}],
"serviceAccountName":
"default-editor"
```
Please take a look, thanks.
</issue>
<code>
[start of manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from http.server import BaseHTTPRequestHandler, HTTPServer
16 import json
17 import os
18 import base64
19
20 kfp_version = os.environ["KFP_VERSION"]
21 disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
22 mlpipeline_minio_access_key = base64.b64encode(
23 bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
24 mlpipeline_minio_secret_key = base64.b64encode(
25 bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
26
27
28 class Controller(BaseHTTPRequestHandler):
29 def sync(self, parent, children):
30 # HACK: Currently using serving.kubeflow.org/inferenceservice to identify
31 # kubeflow user namespaces.
32 # TODO: let Kubeflow profile controller add a pipeline specific label to
33 # user namespaces and use that label instead.
34 pipeline_enabled = parent.get("metadata", {}).get(
35 "labels", {}).get("serving.kubeflow.org/inferenceservice")
36
37 if not pipeline_enabled:
38 return {"status": {}, "children": []}
39
40 # Compute status based on observed state.
41 desired_status = {
42 "kubeflow-pipelines-ready": \
43 len(children["Secret.v1"]) == 1 and \
44 len(children["ConfigMap.v1"]) == 1 and \
45 len(children["Deployment.apps/v1"]) == 2 and \
46 len(children["Service.v1"]) == 2 and \
47 len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
48 len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and \
49 "True" or "False"
50 }
51
52 # Generate the desired child object(s).
53 # parent is a namespace
54 namespace = parent.get("metadata", {}).get("name")
55 desired_resources = [
56 {
57 "apiVersion": "v1",
58 "kind": "ConfigMap",
59 "metadata": {
60 "name": "metadata-grpc-configmap",
61 "namespace": namespace,
62 },
63 "data": {
64 "METADATA_GRPC_SERVICE_HOST":
65 "metadata-grpc-service.kubeflow",
66 "METADATA_GRPC_SERVICE_PORT": "8080",
67 },
68 },
69 # Visualization server related manifests below
70 {
71 "apiVersion": "apps/v1",
72 "kind": "Deployment",
73 "metadata": {
74 "labels": {
75 "app": "ml-pipeline-visualizationserver"
76 },
77 "name": "ml-pipeline-visualizationserver",
78 "namespace": namespace,
79 },
80 "spec": {
81 "selector": {
82 "matchLabels": {
83 "app": "ml-pipeline-visualizationserver"
84 },
85 },
86 "template": {
87 "metadata": {
88 "labels": {
89 "app": "ml-pipeline-visualizationserver"
90 },
91 "annotations": disable_istio_sidecar and {
92 "sidecar.istio.io/inject": "false"
93 } or {},
94 },
95 "spec": {
96 "containers": [{
97 "image":
98 "gcr.io/ml-pipeline/visualization-server:" +
99 kfp_version,
100 "imagePullPolicy":
101 "IfNotPresent",
102 "name":
103 "ml-pipeline-visualizationserver",
104 "ports": [{
105 "containerPort": 8888
106 }],
107 }],
108 "serviceAccountName":
109 "default-editor",
110 },
111 },
112 },
113 },
114 {
115 "apiVersion": "networking.istio.io/v1alpha3",
116 "kind": "DestinationRule",
117 "metadata": {
118 "name": "ml-pipeline-visualizationserver",
119 "namespace": namespace,
120 },
121 "spec": {
122 "host": "ml-pipeline-visualizationserver",
123 "trafficPolicy": {
124 "tls": {
125 "mode": "ISTIO_MUTUAL"
126 }
127 }
128 }
129 },
130 {
131 "apiVersion": "security.istio.io/v1beta1",
132 "kind": "AuthorizationPolicy",
133 "metadata": {
134 "name": "ml-pipeline-visualizationserver",
135 "namespace": namespace,
136 },
137 "spec": {
138 "selector": {
139 "matchLabels": {
140 "app": "ml-pipeline-visualizationserver"
141 }
142 },
143 "rules": [{
144 "from": [{
145 "source": {
146 "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
147 }
148 }]
149 }]
150 }
151 },
152 {
153 "apiVersion": "v1",
154 "kind": "Service",
155 "metadata": {
156 "name": "ml-pipeline-visualizationserver",
157 "namespace": namespace,
158 },
159 "spec": {
160 "ports": [{
161 "name": "http",
162 "port": 8888,
163 "protocol": "TCP",
164 "targetPort": 8888,
165 }],
166 "selector": {
167 "app": "ml-pipeline-visualizationserver",
168 },
169 },
170 },
171 # Artifact fetcher related resources below.
172 {
173 "apiVersion": "apps/v1",
174 "kind": "Deployment",
175 "metadata": {
176 "labels": {
177 "app": "ml-pipeline-ui-artifact"
178 },
179 "name": "ml-pipeline-ui-artifact",
180 "namespace": namespace,
181 },
182 "spec": {
183 "selector": {
184 "matchLabels": {
185 "app": "ml-pipeline-ui-artifact"
186 }
187 },
188 "template": {
189 "metadata": {
190 "labels": {
191 "app": "ml-pipeline-ui-artifact"
192 },
193 "annotations": disable_istio_sidecar and {
194 "sidecar.istio.io/inject": "false"
195 } or {},
196 },
197 "spec": {
198 "containers": [{
199 "name":
200 "ml-pipeline-ui-artifact",
201 "image":
202 "gcr.io/ml-pipeline/frontend:" + kfp_version,
203 "imagePullPolicy":
204 "IfNotPresent",
205 "ports": [{
206 "containerPort": 3000
207 }]
208 }],
209 "serviceAccountName":
210 "default-editor"
211 }
212 }
213 }
214 },
215 {
216 "apiVersion": "v1",
217 "kind": "Service",
218 "metadata": {
219 "name": "ml-pipeline-ui-artifact",
220 "namespace": namespace,
221 "labels": {
222 "app": "ml-pipeline-ui-artifact"
223 }
224 },
225 "spec": {
226 "ports": [{
227 "name":
228 "http", # name is required to let istio understand request protocol
229 "port": 80,
230 "protocol": "TCP",
231 "targetPort": 3000
232 }],
233 "selector": {
234 "app": "ml-pipeline-ui-artifact"
235 }
236 }
237 },
238 ]
239 print('Received request:', parent)
240 print('Desired resources except secrets:', desired_resources)
241 # Moved after the print argument because this is sensitive data.
242 desired_resources.append({
243 "apiVersion": "v1",
244 "kind": "Secret",
245 "metadata": {
246 "name": "mlpipeline-minio-artifact",
247 "namespace": namespace,
248 },
249 "data": {
250 "accesskey": mlpipeline_minio_access_key,
251 "secretkey": mlpipeline_minio_secret_key,
252 },
253 })
254
255 return {"status": desired_status, "children": desired_resources}
256
257 def do_POST(self):
258 # Serve the sync() function as a JSON webhook.
259 observed = json.loads(
260 self.rfile.read(int(self.headers.get("content-length"))))
261 desired = self.sync(observed["parent"], observed["children"])
262
263 self.send_response(200)
264 self.send_header("Content-type", "application/json")
265 self.end_headers()
266 self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
267
268
269 HTTPServer(("", 80), Controller).serve_forever()
270
[end of manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
--- a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
+++ b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
@@ -104,6 +104,16 @@
"ports": [{
"containerPort": 8888
}],
+ "resources": {
+ "requests": {
+ "cpu": "50m",
+ "memory": "200Mi"
+ },
+ "limits": {
+ "cpu": "500m",
+ "memory": "1Gi"
+ },
+ }
}],
"serviceAccountName":
"default-editor",
@@ -204,7 +214,17 @@
"IfNotPresent",
"ports": [{
"containerPort": 3000
- }]
+ }],
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "70Mi"
+ },
+ "limits": {
+ "cpu": "100m",
+ "memory": "500Mi"
+ },
+ }
}],
"serviceAccountName":
"default-editor"
|
{"golden_diff": "diff --git a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n--- a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n+++ b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n@@ -104,6 +104,16 @@\n \"ports\": [{\n \"containerPort\": 8888\n }],\n+ \"resources\": {\n+ \"requests\": {\n+ \"cpu\": \"50m\",\n+ \"memory\": \"200Mi\"\n+ },\n+ \"limits\": {\n+ \"cpu\": \"500m\",\n+ \"memory\": \"1Gi\"\n+ },\n+ }\n }],\n \"serviceAccountName\":\n \"default-editor\",\n@@ -204,7 +214,17 @@\n \"IfNotPresent\",\n \"ports\": [{\n \"containerPort\": 3000\n- }]\n+ }],\n+ \"resources\": {\n+ \"requests\": {\n+ \"cpu\": \"10m\",\n+ \"memory\": \"70Mi\"\n+ },\n+ \"limits\": {\n+ \"cpu\": \"100m\",\n+ \"memory\": \"500Mi\"\n+ },\n+ }\n }],\n \"serviceAccountName\":\n \"default-editor\"\n", "issue": "'kubeflow-pipelines-profile-controller' fails to deploy pods on profile creation when ResourceQuota is set in the profile.\nIn multi-user mode, it seems like `kubeflow-pipelines-profile-controller` deploy below pods on Kubeflow profile creation.\r\n\r\n- `ml-pipeline-ui-artifact`\r\n- `ml-pipeline-visualizationserver`\r\n\r\nWhen `ResourceQuota` is set in the profile, `kubeflow-pipelines-profile-controller` fails to deploy `ml-pipeline-ui-artifact` and `ml-pipeline-visualizationserver` with below error.\r\n\r\n```\r\nWarning FailedCreate 17m replicaset-controller \r\nError creating: pods \"ml-pipeline-ui-artifact-684c5db68-s74w8\" is forbidden: failed quota: kf-resource-quota: must specify cpu,memory\r\n```\r\n\r\n- Related code: [https://github.com/kubeflow/manifests/blob/master/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py](https://github.com/kubeflow/manifests/blob/master/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py)\r\n- Cause: The container resource limit & request is not set on the pod specs, so the pods can not be deployed in the namespace (which has `ResourceQuota` ).\r\n\r\nSince Kubeflow profile supports setting `ResourceQuota`, `kubeflow-pipelines-profile-controller` should set container resource requests & limits in pod specs to avoid above errors. \r\n\r\nI confirmed that with below patch, ml-pipeline pods are successfully deployed.\r\n\r\n```python\r\ndiff --git a/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py b/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py\r\nindex 75c6e5db..a0e71fbf 100644\r\n--- a/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py\r\n+++ b/apps/pipeline/upstream/installs/multi-user/pipelines-profile-controller/sync.py\r\n@@ -104,6 +104,16 @@ class Controller(BaseHTTPRequestHandler):\r\n \"ports\": [{\r\n \"containerPort\": 8888\r\n }],\r\n+ \"resources\": {\r\n+ \"requests\": {\r\n+ \"cpu\": \"50m\",\r\n+ \"memory\": \"200Mi\"\r\n+ },\r\n+ \"limits\": {\r\n+ \"cpu\": \"500m\",\r\n+ \"memory\": \"2Gi\"\r\n+ },\r\n+ }\r\n }],\r\n \"serviceAccountName\":\r\n \"default-editor\",\r\n@@ -204,7 +214,17 @@ class Controller(BaseHTTPRequestHandler):\r\n \"IfNotPresent\",\r\n \"ports\": [{\r\n \"containerPort\": 3000\r\n- }]\r\n+ }],\r\n+ \"resources\": {\r\n+ \"requests\": {\r\n+ \"cpu\": \"50m\",\r\n+ \"memory\": \"200Mi\"\r\n+ },\r\n+ \"limits\": {\r\n+ \"cpu\": \"500m\",\r\n+ \"memory\": \"2Gi\"\r\n+ },\r\n+ }\r\n }],\r\n \"serviceAccountName\":\r\n \"default-editor\"\r\n```\r\n\r\nPlease take a look, thanks.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport json\nimport os\nimport base64\n\nkfp_version = os.environ[\"KFP_VERSION\"]\ndisable_istio_sidecar = os.environ.get(\"DISABLE_ISTIO_SIDECAR\") == \"true\"\nmlpipeline_minio_access_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_ACCESS_KEY\"), 'utf-8')).decode('utf-8')\nmlpipeline_minio_secret_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_SECRET_KEY\"), 'utf-8')).decode('utf-8')\n\n\nclass Controller(BaseHTTPRequestHandler):\n def sync(self, parent, children):\n # HACK: Currently using serving.kubeflow.org/inferenceservice to identify\n # kubeflow user namespaces.\n # TODO: let Kubeflow profile controller add a pipeline specific label to\n # user namespaces and use that label instead.\n pipeline_enabled = parent.get(\"metadata\", {}).get(\n \"labels\", {}).get(\"serving.kubeflow.org/inferenceservice\")\n\n if not pipeline_enabled:\n return {\"status\": {}, \"children\": []}\n\n # Compute status based on observed state.\n desired_status = {\n \"kubeflow-pipelines-ready\": \\\n len(children[\"Secret.v1\"]) == 1 and \\\n len(children[\"ConfigMap.v1\"]) == 1 and \\\n len(children[\"Deployment.apps/v1\"]) == 2 and \\\n len(children[\"Service.v1\"]) == 2 and \\\n len(children[\"DestinationRule.networking.istio.io/v1alpha3\"]) == 1 and \\\n len(children[\"AuthorizationPolicy.security.istio.io/v1beta1\"]) == 1 and \\\n \"True\" or \"False\"\n }\n\n # Generate the desired child object(s).\n # parent is a namespace\n namespace = parent.get(\"metadata\", {}).get(\"name\")\n desired_resources = [\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"ConfigMap\",\n \"metadata\": {\n \"name\": \"metadata-grpc-configmap\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"METADATA_GRPC_SERVICE_HOST\":\n \"metadata-grpc-service.kubeflow\",\n \"METADATA_GRPC_SERVICE_PORT\": \"8080\",\n },\n },\n # Visualization server related manifests below\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"image\":\n \"gcr.io/ml-pipeline/visualization-server:\" +\n kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"name\":\n \"ml-pipeline-visualizationserver\",\n \"ports\": [{\n \"containerPort\": 8888\n }],\n }],\n \"serviceAccountName\":\n \"default-editor\",\n },\n },\n },\n },\n {\n \"apiVersion\": \"networking.istio.io/v1alpha3\",\n \"kind\": \"DestinationRule\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"host\": \"ml-pipeline-visualizationserver\",\n \"trafficPolicy\": {\n \"tls\": {\n \"mode\": \"ISTIO_MUTUAL\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"security.istio.io/v1beta1\",\n \"kind\": \"AuthorizationPolicy\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n }\n },\n \"rules\": [{\n \"from\": [{\n \"source\": {\n \"principals\": [\"cluster.local/ns/kubeflow/sa/ml-pipeline\"]\n }\n }]\n }]\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"ports\": [{\n \"name\": \"http\",\n \"port\": 8888,\n \"protocol\": \"TCP\",\n \"targetPort\": 8888,\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-visualizationserver\",\n },\n },\n },\n # Artifact fetcher related resources below.\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"name\":\n \"ml-pipeline-ui-artifact\",\n \"image\":\n \"gcr.io/ml-pipeline/frontend:\" + kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"ports\": [{\n \"containerPort\": 3000\n }]\n }],\n \"serviceAccountName\":\n \"default-editor\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"spec\": {\n \"ports\": [{\n \"name\":\n \"http\", # name is required to let istio understand request protocol\n \"port\": 80,\n \"protocol\": \"TCP\",\n \"targetPort\": 3000\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n }\n },\n ]\n print('Received request:', parent)\n print('Desired resources except secrets:', desired_resources)\n # Moved after the print argument because this is sensitive data.\n desired_resources.append({\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": \"mlpipeline-minio-artifact\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"accesskey\": mlpipeline_minio_access_key,\n \"secretkey\": mlpipeline_minio_secret_key,\n },\n })\n\n return {\"status\": desired_status, \"children\": desired_resources}\n\n def do_POST(self):\n # Serve the sync() function as a JSON webhook.\n observed = json.loads(\n self.rfile.read(int(self.headers.get(\"content-length\"))))\n desired = self.sync(observed[\"parent\"], observed[\"children\"])\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(json.dumps(desired), 'utf-8'))\n\n\nHTTPServer((\"\", 80), Controller).serve_forever()\n", "path": "manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py"}]}
| 3,863 | 331 |
gh_patches_debug_552
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-880
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.1
On the docket:
+ [x] PEX 2.1.0 regression: pex file won't build inside a running docker image as user #850
+ [x] Fully pin vendored requirements. #853
+ [x] Fix `tox -epackage` to create pex supporting 3.8. #843
+ [x] Pex erroneously warns about needing to use vendored `pkg_resources` for distributions with empty `namespace_packages.txt` metadata files. #840
+ [x] Interpreter discovery and pyenv don't interact well #782
+ [x] ensure_python_interpreter() bootstrapping broken on pypy shard #477
+ [x] Resolve error checking does not account for environment markers. #851
+ [x] Ensure Pex PEX contraints match pex wheel / sdist. #863
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.0'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.0'
+__version__ = '2.1.1'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.0'\n+__version__ = '2.1.1'\n", "issue": "Release 2.1.1\nOn the docket:\r\n+ [x] PEX 2.1.0 regression: pex file won't build inside a running docker image as user #850\r\n+ [x] Fully pin vendored requirements. #853\r\n+ [x] Fix `tox -epackage` to create pex supporting 3.8. #843\r\n+ [x] Pex erroneously warns about needing to use vendored `pkg_resources` for distributions with empty `namespace_packages.txt` metadata files. #840\r\n+ [x] Interpreter discovery and pyenv don't interact well #782\r\n+ [x] ensure_python_interpreter() bootstrapping broken on pypy shard #477\r\n+ [x] Resolve error checking does not account for environment markers. #851\r\n+ [x] Ensure Pex PEX contraints match pex wheel / sdist. #863\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.0'\n", "path": "pex/version.py"}]}
| 790 | 94 |
gh_patches_debug_29034
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4545
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[WARNI] Could not get the package version from pkg_resource
I execute a ADO pipeline that checks a terraform directory with subdirectories that have 4 modules locally.

Checkov version:
docker.io/bridgecrew/checkov:latest
Path of the Terraform maint.tf and directory that checks chekov: PoC\platform\main.tf
Execution:
LOG_LEVEL=DEBUG;docker run --volume $(pwd):/tf bridgecrew/checkov --directory /tf --framework terraform --output junitxml --soft-fail > $(pwd)/CheckovReport.xml
Result:
2023-02-22 12:23:07,252 [MainThread ] [WARNI] Could not get the package version from pkg_resources
I did a list of the files and I can see the checkovReport.xml but it doesn't have enough data to be published:
No Result Found to Publish '/home/vsts/work/1/s/PoC/platform/CheckovReport.xml'.
=====================List all the files of main folder============================
/home/vsts/work/1/s/PoC/platform
total 40
-rw-r--r-- 1 vsts docker 3728 Feb 22 12:22 main.tf
drwxr-xr-x 10 vsts docker 4096 Feb 22 12:22 azure-tfmodule
-rw-r--r-- 1 vsts docker 2563 Feb 22 12:22 README.md
-rw-r--r-- 1 vsts docker 962 Feb 22 12:22 .gitignore
drwxr-xr-x 3 vsts docker 4096 Feb 22 12:22 ..
-rw-r--r-- 1 vsts docker 1256 Feb 22 12:22 versions.tf
-rw-r--r-- 1 vsts docker 1477 Feb 22 12:22 variables.tf
-rw-r--r-- 1 vsts docker 1292 Feb 22 12:22 terraform.tfvars
drwxr-xr-x 3 vsts docker 4096 Feb 22 12:23 .
-rw-r--r-- 1 vsts docker 2191 Feb 22 12:23 CheckovReport.xml
This is an example of one of the modules with the WARNI, on the main.tf, where workingDirectory: "./PoC/platform"
module "azurerm_log_analytics_workspace" {
source = "./azure-tfmodule/azure-loganalytics-workspace"
law_name = module.naming_standard.standard.log-analytics-workspace
location = var.location
resource_group_name = var.resource_group_name
law_sku = var.law_sku
retention_in_days = var.law_retention_in_days
tags = var.tags
depends_on = [
module.naming_standard
]
}
If I try to check a terraform plan in json format not the main.tf I get the same warning.
If I execute same pipeline in the terraform directory without modules it works, the problem is when I try to execute the checkov and review a local modules or the terraform plan output that calls local modules. Is not on this example but I tested too.
This is my yml pipeline stage:
pool:
vmImage: ubuntu-latest
stages:
- stage: "runCheckov"
displayName: "Checkov - Scan Terraform files"
jobs:
- job: "runCheckov"
displayName: "Checkov > Pull, run and publish results of Checkov scan"
steps:
- bash: |
docker pull bridgecrew/checkov
workingDirectory: "./PoC/platform"
displayName: "Pull > bridgecrew/checkov"
- bash: |
LOG_LEVEL=DEBUG;docker run --volume $(pwd):/tf bridgecrew/checkov --directory /tf --framework terraform --output junitxml --soft-fail > $(pwd)/CheckovReport.xml
workingDirectory: "./PoC/platform"
displayName: "Run > checkov"
- script: |
echo "=====================List all the files of main folder============================"
cd ./PoC/platform
pwd
ls -ltra
- task: PublishTestResults@2
inputs:
testRunTitle: "Checkov Results"
failTaskOnFailedTests: true
testResultsFormat: "JUnit"
testResultsFiles: "CheckovReport.xml"
searchFolder: "./PoC/platform"
displayName: "Publish > Checkov scan results"
mergeTestResults: false
publishRunAttachments: true
</issue>
<code>
[start of checkov/common/util/stopit/utils.py]
1 # -*- coding: utf-8 -*-
2 """
3 ============
4 stopit.utils
5 ============
6
7 Misc utilities and common resources
8 """
9
10 import functools
11 import logging
12 import sys
13
14 # Custom logger
15 LOG = logging.getLogger(name='stopit')
16
17 if sys.version_info < (2, 7):
18 class NullHandler(logging.Handler):
19 """Copied from Python 2.7 to avoid getting `No handlers could be found
20 for logger "xxx"` http://bugs.python.org/issue16539
21 """
22 def handle(self, record):
23 pass
24
25 def emit(self, record):
26 pass
27
28 def createLock(self):
29 self.lock = None # noqa
30 else:
31 from logging import NullHandler
32
33 LOG.addHandler(NullHandler())
34
35
36 class TimeoutException(Exception):
37 """Raised when the block under context management takes longer to complete
38 than the allowed maximum timeout value.
39 """
40 pass
41
42
43 class BaseTimeout(object):
44 """Context manager for limiting in the time the execution of a block
45
46 :param seconds: ``float`` or ``int`` duration enabled to run the context
47 manager block
48 :param swallow_exc: ``False`` if you want to manage the
49 ``TimeoutException`` (or any other) in an outer ``try ... except``
50 structure. ``True`` (default) if you just want to check the execution of
51 the block with the ``state`` attribute of the context manager.
52 """
53
54 def __init__(self, seconds, swallow_exc=True):
55
56 # Possible values for the ``state`` attribute, self explanative
57 self.EXECUTED, self.EXECUTING, self.TIMED_OUT, self.INTERRUPTED, self.CANCELED = range(5)
58
59 self.seconds = seconds
60 self.swallow_exc = swallow_exc
61 self.state = self.EXECUTED
62
63 def __bool__(self):
64 return self.state in (self.EXECUTED, self.EXECUTING, self.CANCELED)
65
66 def __repr__(self):
67 """Debug helper
68 """
69 return "<{0} in state: {1}>".format(self.__class__.__name__, self.state)
70
71 def __enter__(self):
72 self.state = self.EXECUTING
73 self.setup_interrupt()
74 return self
75
76 def __exit__(self, exc_type, exc_val, exc_tb):
77 if exc_type is TimeoutException:
78 if self.state != self.TIMED_OUT:
79 self.state = self.INTERRUPTED
80 self.suppress_interrupt()
81 LOG.warning("Code block execution exceeded {0} seconds timeout".format(self.seconds),
82 exc_info=(exc_type, exc_val, exc_tb))
83 return self.swallow_exc
84 else:
85 if exc_type is None:
86 self.state = self.EXECUTED
87 self.suppress_interrupt()
88 return False
89
90 def cancel(self):
91 """In case in the block you realize you don't need anymore
92 limitation"""
93 self.state = self.CANCELED
94 self.suppress_interrupt()
95
96 # Methods must be provided by subclasses
97 def suppress_interrupt(self):
98 """Removes/neutralizes the feature that interrupts the executed block
99 """
100 raise NotImplementedError
101
102 def setup_interrupt(self):
103 """Installs/initializes the feature that interrupts the executed block
104 """
105 raise NotImplementedError
106
107
108 class base_timeoutable(object):
109 """A base for function or method decorator that raises a ``TimeoutException`` to
110 decorated functions that should not last a certain amount of time.
111
112 Any decorated callable may receive a ``timeout`` optional parameter that
113 specifies the number of seconds allocated to the callable execution.
114
115 The decorated functions that exceed that timeout return ``None`` or the
116 value provided by the decorator.
117
118 :param default: The default value in case we timed out during the decorated
119 function execution. Default is None.
120
121 :param timeout_param: As adding dynamically a ``timeout`` named parameter
122 to the decorated callable may conflict with the callable signature, you
123 may choose another name to provide that parameter. Your decoration line
124 could look like ``@timeoutable(timeout_param='my_timeout')``
125
126 .. note::
127
128 This is a base class that must be subclassed. subclasses must override
129 thz ``to_ctx_mgr`` with a timeout context manager class which in turn
130 must subclasses of above ``BaseTimeout`` class.
131 """
132
133 def __init__(self, default=None, timeout_param='timeout'):
134 self.to_ctx_mgr = None
135 self.default, self.timeout_param = default, timeout_param
136
137 def __call__(self, func):
138 @functools.wraps(func)
139 def wrapper(*args, **kwargs):
140 timeout = kwargs.pop(self.timeout_param, None)
141 if timeout:
142 with self.to_ctx_mgr(timeout, swallow_exc=True):
143 result = self.default # noqa
144 # ``result`` may not be assigned below in case of timeout
145 result = func(*args, **kwargs)
146 return result
147 else:
148 return func(*args, **kwargs)
149 return wrapper
150
[end of checkov/common/util/stopit/utils.py]
[start of checkov/common/util/stopit/__init__.py]
1 # -*- coding: utf-8 -*-
2 """
3 ======
4 stopit
5 ======
6
7 code from:
8 https://github.com/glenfant/stopit
9 package:
10 https://pypi.org/project/stopit
11
12 Public resources from ``stopit``
13 """
14
15 import pkg_resources
16
17 from .utils import LOG, TimeoutException
18 from .threadstop import ThreadingTimeout, async_raise, threading_timeoutable
19 from .signalstop import SignalTimeout, signal_timeoutable
20
21 # PEP 396 style version marker
22 try:
23 __version__ = pkg_resources.get_distribution(__name__).version
24 except Exception:
25 LOG.warning("Could not get the package version from pkg_resources")
26 __version__ = 'unknown'
27
28 __all__ = (
29 'ThreadingTimeout', 'async_raise', 'threading_timeoutable',
30 'SignalTimeout', 'signal_timeoutable', 'TimeoutException'
31 )
32
[end of checkov/common/util/stopit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/common/util/stopit/__init__.py b/checkov/common/util/stopit/__init__.py
--- a/checkov/common/util/stopit/__init__.py
+++ b/checkov/common/util/stopit/__init__.py
@@ -12,18 +12,10 @@
Public resources from ``stopit``
"""
-import pkg_resources
-
-from .utils import LOG, TimeoutException
+from .utils import TimeoutException
from .threadstop import ThreadingTimeout, async_raise, threading_timeoutable
from .signalstop import SignalTimeout, signal_timeoutable
-# PEP 396 style version marker
-try:
- __version__ = pkg_resources.get_distribution(__name__).version
-except Exception:
- LOG.warning("Could not get the package version from pkg_resources")
- __version__ = 'unknown'
__all__ = (
'ThreadingTimeout', 'async_raise', 'threading_timeoutable',
diff --git a/checkov/common/util/stopit/utils.py b/checkov/common/util/stopit/utils.py
--- a/checkov/common/util/stopit/utils.py
+++ b/checkov/common/util/stopit/utils.py
@@ -26,7 +26,7 @@
pass
def createLock(self):
- self.lock = None # noqa
+ self.lock = None
else:
from logging import NullHandler
@@ -140,7 +140,7 @@
timeout = kwargs.pop(self.timeout_param, None)
if timeout:
with self.to_ctx_mgr(timeout, swallow_exc=True):
- result = self.default # noqa
+ result = self.default
# ``result`` may not be assigned below in case of timeout
result = func(*args, **kwargs)
return result
|
{"golden_diff": "diff --git a/checkov/common/util/stopit/__init__.py b/checkov/common/util/stopit/__init__.py\n--- a/checkov/common/util/stopit/__init__.py\n+++ b/checkov/common/util/stopit/__init__.py\n@@ -12,18 +12,10 @@\n Public resources from ``stopit``\n \"\"\"\n \n-import pkg_resources\n-\n-from .utils import LOG, TimeoutException\n+from .utils import TimeoutException\n from .threadstop import ThreadingTimeout, async_raise, threading_timeoutable\n from .signalstop import SignalTimeout, signal_timeoutable\n \n-# PEP 396 style version marker\n-try:\n- __version__ = pkg_resources.get_distribution(__name__).version\n-except Exception:\n- LOG.warning(\"Could not get the package version from pkg_resources\")\n- __version__ = 'unknown'\n \n __all__ = (\n 'ThreadingTimeout', 'async_raise', 'threading_timeoutable',\ndiff --git a/checkov/common/util/stopit/utils.py b/checkov/common/util/stopit/utils.py\n--- a/checkov/common/util/stopit/utils.py\n+++ b/checkov/common/util/stopit/utils.py\n@@ -26,7 +26,7 @@\n pass\n \n def createLock(self):\n- self.lock = None # noqa\n+ self.lock = None\n else:\n from logging import NullHandler\n \n@@ -140,7 +140,7 @@\n timeout = kwargs.pop(self.timeout_param, None)\n if timeout:\n with self.to_ctx_mgr(timeout, swallow_exc=True):\n- result = self.default # noqa\n+ result = self.default\n # ``result`` may not be assigned below in case of timeout\n result = func(*args, **kwargs)\n return result\n", "issue": " [WARNI] Could not get the package version from pkg_resource\nI execute a ADO pipeline that checks a terraform directory with subdirectories that have 4 modules locally.\r\n\r\n\r\n\r\n\r\nCheckov version:\r\ndocker.io/bridgecrew/checkov:latest\r\n\r\nPath of the Terraform maint.tf and directory that checks chekov: PoC\\platform\\main.tf\r\n\r\n\r\nExecution:\r\nLOG_LEVEL=DEBUG;docker run --volume $(pwd):/tf bridgecrew/checkov --directory /tf --framework terraform --output junitxml --soft-fail > $(pwd)/CheckovReport.xml\r\nResult:\r\n2023-02-22 12:23:07,252 [MainThread ] [WARNI] Could not get the package version from pkg_resources\r\n\r\n\r\n\r\nI did a list of the files and I can see the checkovReport.xml but it doesn't have enough data to be published:\r\nNo Result Found to Publish '/home/vsts/work/1/s/PoC/platform/CheckovReport.xml'.\r\n\r\n\r\n=====================List all the files of main folder============================\r\n/home/vsts/work/1/s/PoC/platform\r\ntotal 40\r\n-rw-r--r-- 1 vsts docker 3728 Feb 22 12:22 main.tf\r\ndrwxr-xr-x 10 vsts docker 4096 Feb 22 12:22 azure-tfmodule\r\n-rw-r--r-- 1 vsts docker 2563 Feb 22 12:22 README.md\r\n-rw-r--r-- 1 vsts docker 962 Feb 22 12:22 .gitignore\r\ndrwxr-xr-x 3 vsts docker 4096 Feb 22 12:22 ..\r\n-rw-r--r-- 1 vsts docker 1256 Feb 22 12:22 versions.tf\r\n-rw-r--r-- 1 vsts docker 1477 Feb 22 12:22 variables.tf\r\n-rw-r--r-- 1 vsts docker 1292 Feb 22 12:22 terraform.tfvars\r\ndrwxr-xr-x 3 vsts docker 4096 Feb 22 12:23 .\r\n-rw-r--r-- 1 vsts docker 2191 Feb 22 12:23 CheckovReport.xml\r\n\r\n\r\nThis is an example of one of the modules with the WARNI, on the main.tf, where workingDirectory: \"./PoC/platform\"\r\nmodule \"azurerm_log_analytics_workspace\" {\r\n source = \"./azure-tfmodule/azure-loganalytics-workspace\"\r\n law_name = module.naming_standard.standard.log-analytics-workspace\r\n location = var.location\r\n resource_group_name = var.resource_group_name\r\n law_sku = var.law_sku\r\n retention_in_days = var.law_retention_in_days\r\n tags = var.tags \r\n\r\n depends_on = [\r\n module.naming_standard\r\n ]\r\n}\r\n\r\nIf I try to check a terraform plan in json format not the main.tf I get the same warning. \r\nIf I execute same pipeline in the terraform directory without modules it works, the problem is when I try to execute the checkov and review a local modules or the terraform plan output that calls local modules. Is not on this example but I tested too.\r\n\r\n\r\nThis is my yml pipeline stage:\r\npool:\r\n vmImage: ubuntu-latest\r\n\r\nstages:\r\n- stage: \"runCheckov\"\r\n displayName: \"Checkov - Scan Terraform files\"\r\n jobs:\r\n - job: \"runCheckov\"\r\n displayName: \"Checkov > Pull, run and publish results of Checkov scan\"\r\n steps:\r\n - bash: |\r\n docker pull bridgecrew/checkov\r\n workingDirectory: \"./PoC/platform\"\r\n displayName: \"Pull > bridgecrew/checkov\"\r\n - bash: |\r\n LOG_LEVEL=DEBUG;docker run --volume $(pwd):/tf bridgecrew/checkov --directory /tf --framework terraform --output junitxml --soft-fail > $(pwd)/CheckovReport.xml\r\n workingDirectory: \"./PoC/platform\"\r\n displayName: \"Run > checkov\"\r\n - script: |\r\n echo \"=====================List all the files of main folder============================\"\r\n cd ./PoC/platform\r\n pwd\r\n ls -ltra\r\n - task: PublishTestResults@2\r\n inputs:\r\n testRunTitle: \"Checkov Results\"\r\n failTaskOnFailedTests: true\r\n testResultsFormat: \"JUnit\"\r\n testResultsFiles: \"CheckovReport.xml\"\r\n searchFolder: \"./PoC/platform\"\r\n displayName: \"Publish > Checkov scan results\"\r\n mergeTestResults: false\r\n publishRunAttachments: true\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n============\nstopit.utils\n============\n\nMisc utilities and common resources\n\"\"\"\n\nimport functools\nimport logging\nimport sys\n\n# Custom logger\nLOG = logging.getLogger(name='stopit')\n\nif sys.version_info < (2, 7):\n class NullHandler(logging.Handler):\n \"\"\"Copied from Python 2.7 to avoid getting `No handlers could be found\n for logger \"xxx\"` http://bugs.python.org/issue16539\n \"\"\"\n def handle(self, record):\n pass\n\n def emit(self, record):\n pass\n\n def createLock(self):\n self.lock = None # noqa\nelse:\n from logging import NullHandler\n\nLOG.addHandler(NullHandler())\n\n\nclass TimeoutException(Exception):\n \"\"\"Raised when the block under context management takes longer to complete\n than the allowed maximum timeout value.\n \"\"\"\n pass\n\n\nclass BaseTimeout(object):\n \"\"\"Context manager for limiting in the time the execution of a block\n\n :param seconds: ``float`` or ``int`` duration enabled to run the context\n manager block\n :param swallow_exc: ``False`` if you want to manage the\n ``TimeoutException`` (or any other) in an outer ``try ... except``\n structure. ``True`` (default) if you just want to check the execution of\n the block with the ``state`` attribute of the context manager.\n \"\"\"\n\n def __init__(self, seconds, swallow_exc=True):\n\n # Possible values for the ``state`` attribute, self explanative\n self.EXECUTED, self.EXECUTING, self.TIMED_OUT, self.INTERRUPTED, self.CANCELED = range(5)\n\n self.seconds = seconds\n self.swallow_exc = swallow_exc\n self.state = self.EXECUTED\n\n def __bool__(self):\n return self.state in (self.EXECUTED, self.EXECUTING, self.CANCELED)\n\n def __repr__(self):\n \"\"\"Debug helper\n \"\"\"\n return \"<{0} in state: {1}>\".format(self.__class__.__name__, self.state)\n\n def __enter__(self):\n self.state = self.EXECUTING\n self.setup_interrupt()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is TimeoutException:\n if self.state != self.TIMED_OUT:\n self.state = self.INTERRUPTED\n self.suppress_interrupt()\n LOG.warning(\"Code block execution exceeded {0} seconds timeout\".format(self.seconds),\n exc_info=(exc_type, exc_val, exc_tb))\n return self.swallow_exc\n else:\n if exc_type is None:\n self.state = self.EXECUTED\n self.suppress_interrupt()\n return False\n\n def cancel(self):\n \"\"\"In case in the block you realize you don't need anymore\n limitation\"\"\"\n self.state = self.CANCELED\n self.suppress_interrupt()\n\n # Methods must be provided by subclasses\n def suppress_interrupt(self):\n \"\"\"Removes/neutralizes the feature that interrupts the executed block\n \"\"\"\n raise NotImplementedError\n\n def setup_interrupt(self):\n \"\"\"Installs/initializes the feature that interrupts the executed block\n \"\"\"\n raise NotImplementedError\n\n\nclass base_timeoutable(object):\n \"\"\"A base for function or method decorator that raises a ``TimeoutException`` to\n decorated functions that should not last a certain amount of time.\n\n Any decorated callable may receive a ``timeout`` optional parameter that\n specifies the number of seconds allocated to the callable execution.\n\n The decorated functions that exceed that timeout return ``None`` or the\n value provided by the decorator.\n\n :param default: The default value in case we timed out during the decorated\n function execution. Default is None.\n\n :param timeout_param: As adding dynamically a ``timeout`` named parameter\n to the decorated callable may conflict with the callable signature, you\n may choose another name to provide that parameter. Your decoration line\n could look like ``@timeoutable(timeout_param='my_timeout')``\n\n .. note::\n\n This is a base class that must be subclassed. subclasses must override\n thz ``to_ctx_mgr`` with a timeout context manager class which in turn\n must subclasses of above ``BaseTimeout`` class.\n \"\"\"\n\n def __init__(self, default=None, timeout_param='timeout'):\n self.to_ctx_mgr = None\n self.default, self.timeout_param = default, timeout_param\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n timeout = kwargs.pop(self.timeout_param, None)\n if timeout:\n with self.to_ctx_mgr(timeout, swallow_exc=True):\n result = self.default # noqa\n # ``result`` may not be assigned below in case of timeout\n result = func(*args, **kwargs)\n return result\n else:\n return func(*args, **kwargs)\n return wrapper\n", "path": "checkov/common/util/stopit/utils.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\n======\nstopit\n======\n\ncode from:\nhttps://github.com/glenfant/stopit\npackage:\nhttps://pypi.org/project/stopit\n\nPublic resources from ``stopit``\n\"\"\"\n\nimport pkg_resources\n\nfrom .utils import LOG, TimeoutException\nfrom .threadstop import ThreadingTimeout, async_raise, threading_timeoutable\nfrom .signalstop import SignalTimeout, signal_timeoutable\n\n# PEP 396 style version marker\ntry:\n __version__ = pkg_resources.get_distribution(__name__).version\nexcept Exception:\n LOG.warning(\"Could not get the package version from pkg_resources\")\n __version__ = 'unknown'\n\n__all__ = (\n 'ThreadingTimeout', 'async_raise', 'threading_timeoutable',\n 'SignalTimeout', 'signal_timeoutable', 'TimeoutException'\n)\n", "path": "checkov/common/util/stopit/__init__.py"}]}
| 3,356 | 391 |
gh_patches_debug_6017
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-2289
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TST: test_value_counts breaking against pandas latest
Recent CI builds on 38-dev.yaml are all failing due to geopandas/tests/test_pandas_methods.py::test_value_counts, which fails with the output:
```
______________________________ test_value_counts _______________________________
3536
[gw0] linux -- Python 3.8.12 /usr/share/miniconda3/envs/test/bin/python
3537
3538
def test_value_counts():
3539
# each object is considered unique
3540
s = GeoSeries([Point(0, 0), Point(1, 1), Point(0, 0)])
3541
res = s.value_counts()
3542
with compat.ignore_shapely2_warnings():
3543
exp = pd.Series([2, 1], index=[Point(0, 0), Point(1, 1)])
3544
> assert_series_equal(res, exp)
3545
3546
geopandas/tests/test_pandas_methods.py:454:
3547
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
3548
3549
left = Index([POINT (0 0), POINT (1 1)], dtype='geometry')
3550
right = Index([POINT (0 0), POINT (1 1)], dtype='object'), obj = 'Series.index'
3551
3552
def _check_types(left, right, obj="Index") -> None:
3553
if not exact:
3554
return
3555
3556
assert_class_equal(left, right, exact=exact, obj=obj)
3557
> assert_attr_equal("inferred_type", left, right, obj=obj)
3558
E AssertionError: Series.index are different
3559
E
3560
E Attribute "inferred_type" are different
3561
E [left]: unknown-array
3562
E [right]: mixed
```
It looks like this pandas change (https://github.com/pandas-dev/pandas/pull/43930) to allow storing extension arrays in the index (that is, not just casting everything to `object`) is responsible.
</issue>
<code>
[start of geopandas/_compat.py]
1 import contextlib
2 from distutils.version import LooseVersion
3 import importlib
4 import os
5 import warnings
6
7 import numpy as np
8 import pandas as pd
9 import pyproj
10 import shapely
11 import shapely.geos
12
13
14 # -----------------------------------------------------------------------------
15 # pandas compat
16 # -----------------------------------------------------------------------------
17
18 PANDAS_GE_10 = str(pd.__version__) >= LooseVersion("1.0.0")
19 PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0")
20 PANDAS_GE_115 = str(pd.__version__) >= LooseVersion("1.1.5")
21 PANDAS_GE_12 = str(pd.__version__) >= LooseVersion("1.2.0")
22
23
24 # -----------------------------------------------------------------------------
25 # Shapely / PyGEOS compat
26 # -----------------------------------------------------------------------------
27
28
29 SHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion("1.7.0")
30 SHAPELY_GE_18 = str(shapely.__version__) >= LooseVersion("1.8")
31 SHAPELY_GE_20 = str(shapely.__version__) >= LooseVersion("2.0")
32
33 GEOS_GE_390 = shapely.geos.geos_version >= (3, 9, 0)
34
35
36 HAS_PYGEOS = None
37 USE_PYGEOS = None
38 PYGEOS_SHAPELY_COMPAT = None
39
40 PYGEOS_GE_09 = None
41 PYGEOS_GE_010 = None
42
43 INSTALL_PYGEOS_ERROR = "To use PyGEOS within GeoPandas, you need to install PyGEOS: \
44 'conda install pygeos' or 'pip install pygeos'"
45
46 try:
47 import pygeos # noqa
48
49 # only automatically use pygeos if version is high enough
50 if str(pygeos.__version__) >= LooseVersion("0.8"):
51 HAS_PYGEOS = True
52 PYGEOS_GE_09 = str(pygeos.__version__) >= LooseVersion("0.9")
53 PYGEOS_GE_010 = str(pygeos.__version__) >= LooseVersion("0.10")
54 else:
55 warnings.warn(
56 "The installed version of PyGEOS is too old ({0} installed, 0.8 required),"
57 " and thus GeoPandas will not use PyGEOS.".format(pygeos.__version__),
58 UserWarning,
59 )
60 HAS_PYGEOS = False
61 except ImportError:
62 HAS_PYGEOS = False
63
64
65 def set_use_pygeos(val=None):
66 """
67 Set the global configuration on whether to use PyGEOS or not.
68
69 The default is use PyGEOS if it is installed. This can be overridden
70 with an environment variable USE_PYGEOS (this is only checked at
71 first import, cannot be changed during interactive session).
72
73 Alternatively, pass a value here to force a True/False value.
74 """
75 global USE_PYGEOS
76 global PYGEOS_SHAPELY_COMPAT
77
78 if val is not None:
79 USE_PYGEOS = bool(val)
80 else:
81 if USE_PYGEOS is None:
82
83 USE_PYGEOS = HAS_PYGEOS
84
85 env_use_pygeos = os.getenv("USE_PYGEOS", None)
86 if env_use_pygeos is not None:
87 USE_PYGEOS = bool(int(env_use_pygeos))
88
89 # validate the pygeos version
90 if USE_PYGEOS:
91 try:
92 import pygeos # noqa
93
94 # validate the pygeos version
95 if not str(pygeos.__version__) >= LooseVersion("0.8"):
96 raise ImportError(
97 "PyGEOS >= 0.8 is required, version {0} is installed".format(
98 pygeos.__version__
99 )
100 )
101
102 # Check whether Shapely and PyGEOS use the same GEOS version.
103 # Based on PyGEOS from_shapely implementation.
104
105 from shapely.geos import geos_version_string as shapely_geos_version
106 from pygeos import geos_capi_version_string
107
108 # shapely has something like: "3.6.2-CAPI-1.10.2 4d2925d6"
109 # pygeos has something like: "3.6.2-CAPI-1.10.2"
110 if not shapely_geos_version.startswith(geos_capi_version_string):
111 warnings.warn(
112 "The Shapely GEOS version ({}) is incompatible with the GEOS "
113 "version PyGEOS was compiled with ({}). Conversions between both "
114 "will be slow.".format(
115 shapely_geos_version, geos_capi_version_string
116 )
117 )
118 PYGEOS_SHAPELY_COMPAT = False
119 else:
120 PYGEOS_SHAPELY_COMPAT = True
121
122 except ImportError:
123 raise ImportError(INSTALL_PYGEOS_ERROR)
124
125
126 set_use_pygeos()
127
128
129 # compat related to deprecation warnings introduced in Shapely 1.8
130 # -> creating a numpy array from a list-like of Multi-part geometries,
131 # although doing the correct thing (not expanding in its parts), still raises
132 # the warning about iteration being deprecated
133 # This adds a context manager to explicitly ignore this warning
134
135
136 try:
137 from shapely.errors import ShapelyDeprecationWarning as shapely_warning
138 except ImportError:
139 shapely_warning = None
140
141
142 if shapely_warning is not None and not SHAPELY_GE_20:
143
144 @contextlib.contextmanager
145 def ignore_shapely2_warnings():
146 with warnings.catch_warnings():
147 warnings.filterwarnings(
148 "ignore", "Iteration|The array interface|__len__", shapely_warning
149 )
150 yield
151
152
153 elif (str(np.__version__) >= LooseVersion("1.21")) and not SHAPELY_GE_20:
154
155 @contextlib.contextmanager
156 def ignore_shapely2_warnings():
157 with warnings.catch_warnings():
158 # warning from numpy for existing Shapely releases (this is fixed
159 # with Shapely 1.8)
160 warnings.filterwarnings(
161 "ignore", "An exception was ignored while fetching", DeprecationWarning
162 )
163 yield
164
165
166 else:
167
168 @contextlib.contextmanager
169 def ignore_shapely2_warnings():
170 yield
171
172
173 def import_optional_dependency(name: str, extra: str = ""):
174 """
175 Import an optional dependency.
176
177 Adapted from pandas.compat._optional::import_optional_dependency
178
179 Raises a formatted ImportError if the module is not present.
180
181 Parameters
182 ----------
183 name : str
184 The module name.
185 extra : str
186 Additional text to include in the ImportError message.
187 Returns
188 -------
189 module
190 """
191 msg = """Missing optional dependency '{name}'. {extra} "
192 "Use pip or conda to install {name}.""".format(
193 name=name, extra=extra
194 )
195
196 if not isinstance(name, str):
197 raise ValueError(
198 "Invalid module name: '{name}'; must be a string".format(name=name)
199 )
200
201 try:
202 module = importlib.import_module(name)
203
204 except ImportError:
205 raise ImportError(msg) from None
206
207 return module
208
209
210 # -----------------------------------------------------------------------------
211 # RTree compat
212 # -----------------------------------------------------------------------------
213
214 HAS_RTREE = None
215 RTREE_GE_094 = False
216 try:
217 import rtree # noqa
218
219 HAS_RTREE = True
220 except ImportError:
221 HAS_RTREE = False
222
223 # -----------------------------------------------------------------------------
224 # pyproj compat
225 # -----------------------------------------------------------------------------
226
227 PYPROJ_LT_3 = LooseVersion(pyproj.__version__) < LooseVersion("3")
228 PYPROJ_GE_31 = LooseVersion(pyproj.__version__) >= LooseVersion("3.1")
229
[end of geopandas/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/_compat.py b/geopandas/_compat.py
--- a/geopandas/_compat.py
+++ b/geopandas/_compat.py
@@ -19,6 +19,7 @@
PANDAS_GE_11 = str(pd.__version__) >= LooseVersion("1.1.0")
PANDAS_GE_115 = str(pd.__version__) >= LooseVersion("1.1.5")
PANDAS_GE_12 = str(pd.__version__) >= LooseVersion("1.2.0")
+PANDAS_GE_14 = str(pd.__version__) >= LooseVersion("1.4.0")
# -----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/geopandas/_compat.py b/geopandas/_compat.py\n--- a/geopandas/_compat.py\n+++ b/geopandas/_compat.py\n@@ -19,6 +19,7 @@\n PANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\n PANDAS_GE_115 = str(pd.__version__) >= LooseVersion(\"1.1.5\")\n PANDAS_GE_12 = str(pd.__version__) >= LooseVersion(\"1.2.0\")\n+PANDAS_GE_14 = str(pd.__version__) >= LooseVersion(\"1.4.0\")\n \n \n # -----------------------------------------------------------------------------\n", "issue": "TST: test_value_counts breaking against pandas latest\nRecent CI builds on 38-dev.yaml are all failing due to geopandas/tests/test_pandas_methods.py::test_value_counts, which fails with the output:\r\n```\r\n______________________________ test_value_counts _______________________________\r\n3536\r\n[gw0] linux -- Python 3.8.12 /usr/share/miniconda3/envs/test/bin/python\r\n3537\r\n\r\n3538\r\n def test_value_counts():\r\n3539\r\n # each object is considered unique\r\n3540\r\n s = GeoSeries([Point(0, 0), Point(1, 1), Point(0, 0)])\r\n3541\r\n res = s.value_counts()\r\n3542\r\n with compat.ignore_shapely2_warnings():\r\n3543\r\n exp = pd.Series([2, 1], index=[Point(0, 0), Point(1, 1)])\r\n3544\r\n> assert_series_equal(res, exp)\r\n3545\r\n\r\n3546\r\ngeopandas/tests/test_pandas_methods.py:454: \r\n3547\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n3548\r\n\r\n3549\r\nleft = Index([POINT (0 0), POINT (1 1)], dtype='geometry')\r\n3550\r\nright = Index([POINT (0 0), POINT (1 1)], dtype='object'), obj = 'Series.index'\r\n3551\r\n\r\n3552\r\n def _check_types(left, right, obj=\"Index\") -> None:\r\n3553\r\n if not exact:\r\n3554\r\n return\r\n3555\r\n \r\n3556\r\n assert_class_equal(left, right, exact=exact, obj=obj)\r\n3557\r\n> assert_attr_equal(\"inferred_type\", left, right, obj=obj)\r\n3558\r\nE AssertionError: Series.index are different\r\n3559\r\nE \r\n3560\r\nE Attribute \"inferred_type\" are different\r\n3561\r\nE [left]: unknown-array\r\n3562\r\nE [right]: mixed\r\n```\r\n\r\nIt looks like this pandas change (https://github.com/pandas-dev/pandas/pull/43930) to allow storing extension arrays in the index (that is, not just casting everything to `object`) is responsible.\n", "before_files": [{"content": "import contextlib\nfrom distutils.version import LooseVersion\nimport importlib\nimport os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pyproj\nimport shapely\nimport shapely.geos\n\n\n# -----------------------------------------------------------------------------\n# pandas compat\n# -----------------------------------------------------------------------------\n\nPANDAS_GE_10 = str(pd.__version__) >= LooseVersion(\"1.0.0\")\nPANDAS_GE_11 = str(pd.__version__) >= LooseVersion(\"1.1.0\")\nPANDAS_GE_115 = str(pd.__version__) >= LooseVersion(\"1.1.5\")\nPANDAS_GE_12 = str(pd.__version__) >= LooseVersion(\"1.2.0\")\n\n\n# -----------------------------------------------------------------------------\n# Shapely / PyGEOS compat\n# -----------------------------------------------------------------------------\n\n\nSHAPELY_GE_17 = str(shapely.__version__) >= LooseVersion(\"1.7.0\")\nSHAPELY_GE_18 = str(shapely.__version__) >= LooseVersion(\"1.8\")\nSHAPELY_GE_20 = str(shapely.__version__) >= LooseVersion(\"2.0\")\n\nGEOS_GE_390 = shapely.geos.geos_version >= (3, 9, 0)\n\n\nHAS_PYGEOS = None\nUSE_PYGEOS = None\nPYGEOS_SHAPELY_COMPAT = None\n\nPYGEOS_GE_09 = None\nPYGEOS_GE_010 = None\n\nINSTALL_PYGEOS_ERROR = \"To use PyGEOS within GeoPandas, you need to install PyGEOS: \\\n'conda install pygeos' or 'pip install pygeos'\"\n\ntry:\n import pygeos # noqa\n\n # only automatically use pygeos if version is high enough\n if str(pygeos.__version__) >= LooseVersion(\"0.8\"):\n HAS_PYGEOS = True\n PYGEOS_GE_09 = str(pygeos.__version__) >= LooseVersion(\"0.9\")\n PYGEOS_GE_010 = str(pygeos.__version__) >= LooseVersion(\"0.10\")\n else:\n warnings.warn(\n \"The installed version of PyGEOS is too old ({0} installed, 0.8 required),\"\n \" and thus GeoPandas will not use PyGEOS.\".format(pygeos.__version__),\n UserWarning,\n )\n HAS_PYGEOS = False\nexcept ImportError:\n HAS_PYGEOS = False\n\n\ndef set_use_pygeos(val=None):\n \"\"\"\n Set the global configuration on whether to use PyGEOS or not.\n\n The default is use PyGEOS if it is installed. This can be overridden\n with an environment variable USE_PYGEOS (this is only checked at\n first import, cannot be changed during interactive session).\n\n Alternatively, pass a value here to force a True/False value.\n \"\"\"\n global USE_PYGEOS\n global PYGEOS_SHAPELY_COMPAT\n\n if val is not None:\n USE_PYGEOS = bool(val)\n else:\n if USE_PYGEOS is None:\n\n USE_PYGEOS = HAS_PYGEOS\n\n env_use_pygeos = os.getenv(\"USE_PYGEOS\", None)\n if env_use_pygeos is not None:\n USE_PYGEOS = bool(int(env_use_pygeos))\n\n # validate the pygeos version\n if USE_PYGEOS:\n try:\n import pygeos # noqa\n\n # validate the pygeos version\n if not str(pygeos.__version__) >= LooseVersion(\"0.8\"):\n raise ImportError(\n \"PyGEOS >= 0.8 is required, version {0} is installed\".format(\n pygeos.__version__\n )\n )\n\n # Check whether Shapely and PyGEOS use the same GEOS version.\n # Based on PyGEOS from_shapely implementation.\n\n from shapely.geos import geos_version_string as shapely_geos_version\n from pygeos import geos_capi_version_string\n\n # shapely has something like: \"3.6.2-CAPI-1.10.2 4d2925d6\"\n # pygeos has something like: \"3.6.2-CAPI-1.10.2\"\n if not shapely_geos_version.startswith(geos_capi_version_string):\n warnings.warn(\n \"The Shapely GEOS version ({}) is incompatible with the GEOS \"\n \"version PyGEOS was compiled with ({}). Conversions between both \"\n \"will be slow.\".format(\n shapely_geos_version, geos_capi_version_string\n )\n )\n PYGEOS_SHAPELY_COMPAT = False\n else:\n PYGEOS_SHAPELY_COMPAT = True\n\n except ImportError:\n raise ImportError(INSTALL_PYGEOS_ERROR)\n\n\nset_use_pygeos()\n\n\n# compat related to deprecation warnings introduced in Shapely 1.8\n# -> creating a numpy array from a list-like of Multi-part geometries,\n# although doing the correct thing (not expanding in its parts), still raises\n# the warning about iteration being deprecated\n# This adds a context manager to explicitly ignore this warning\n\n\ntry:\n from shapely.errors import ShapelyDeprecationWarning as shapely_warning\nexcept ImportError:\n shapely_warning = None\n\n\nif shapely_warning is not None and not SHAPELY_GE_20:\n\n @contextlib.contextmanager\n def ignore_shapely2_warnings():\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", \"Iteration|The array interface|__len__\", shapely_warning\n )\n yield\n\n\nelif (str(np.__version__) >= LooseVersion(\"1.21\")) and not SHAPELY_GE_20:\n\n @contextlib.contextmanager\n def ignore_shapely2_warnings():\n with warnings.catch_warnings():\n # warning from numpy for existing Shapely releases (this is fixed\n # with Shapely 1.8)\n warnings.filterwarnings(\n \"ignore\", \"An exception was ignored while fetching\", DeprecationWarning\n )\n yield\n\n\nelse:\n\n @contextlib.contextmanager\n def ignore_shapely2_warnings():\n yield\n\n\ndef import_optional_dependency(name: str, extra: str = \"\"):\n \"\"\"\n Import an optional dependency.\n\n Adapted from pandas.compat._optional::import_optional_dependency\n\n Raises a formatted ImportError if the module is not present.\n\n Parameters\n ----------\n name : str\n The module name.\n extra : str\n Additional text to include in the ImportError message.\n Returns\n -------\n module\n \"\"\"\n msg = \"\"\"Missing optional dependency '{name}'. {extra} \"\n \"Use pip or conda to install {name}.\"\"\".format(\n name=name, extra=extra\n )\n\n if not isinstance(name, str):\n raise ValueError(\n \"Invalid module name: '{name}'; must be a string\".format(name=name)\n )\n\n try:\n module = importlib.import_module(name)\n\n except ImportError:\n raise ImportError(msg) from None\n\n return module\n\n\n# -----------------------------------------------------------------------------\n# RTree compat\n# -----------------------------------------------------------------------------\n\nHAS_RTREE = None\nRTREE_GE_094 = False\ntry:\n import rtree # noqa\n\n HAS_RTREE = True\nexcept ImportError:\n HAS_RTREE = False\n\n# -----------------------------------------------------------------------------\n# pyproj compat\n# -----------------------------------------------------------------------------\n\nPYPROJ_LT_3 = LooseVersion(pyproj.__version__) < LooseVersion(\"3\")\nPYPROJ_GE_31 = LooseVersion(pyproj.__version__) >= LooseVersion(\"3.1\")\n", "path": "geopandas/_compat.py"}]}
| 3,374 | 145 |
gh_patches_debug_11105
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-130
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in the `search` command
The `search` command appears to have some issues while searching signed integer data
For example while debugging a x86 program:
```
pwndbg> search -4 0xf7eebf83
Traceback (most recent call last):
File "/home/bruce30262/pwndbg/pwndbg/commands/__init__.py", line 57, in __call__
return self.function(*args, **kwargs)
File "/home/bruce30262/pwndbg/pwndbg/commands/__init__.py", line 162, in _ArgparsedCommand
return function(**vars(args))
File "/home/bruce30262/pwndbg/pwndbg/commands/__init__.py", line 115, in _OnlyWhenRunning
return function(*a, **kw)
File "/home/bruce30262/pwndbg/pwndbg/commands/search.py", line 112, in search
value = struct.pack(fmt, value)
struct.error: argument out of range
```
This is because `pwndbg.commands.fix_int(value)` returns `-135348349` instead of an unsigned integer, and the packing format in [search.py](https://github.com/pwndbg/pwndbg/blob/b1892b27741a478cd5361061b0b4dda9ef46d02e/pwndbg/commands/search.py#L106) only support the unsigned data type, causing the error.
Commands like `search -4 -- -100` will also cause the same error.
Kind of curious why it only support the unsigned data type though. It might need some additional check for the appropriate packing format.
</issue>
<code>
[start of pwndbg/commands/search.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import argparse
9 import codecs
10 import os
11 import struct
12
13 import gdb
14
15 import pwndbg.color.memory as M
16 import pwndbg.commands
17 import pwndbg.config
18 import pwndbg.enhance
19 import pwndbg.search
20 import pwndbg.vmmap
21
22 saved = set()
23
24 def print_search_hit(address):
25 """Prints out a single search hit.
26
27 Arguments:
28 address(int): Address to print
29 """
30 if not address:
31 return
32
33 vmmap = pwndbg.vmmap.find(address)
34 if vmmap:
35 region = os.path.basename(vmmap.objfile)
36 else:
37 region = '[mapped]'
38
39 region = region.ljust(15)
40
41 region = M.get(address, region)
42 addr = M.get(address)
43 display = pwndbg.enhance.enhance(address)
44 print(region,addr,display)
45
46 auto_save = pwndbg.config.Parameter('auto-save-search', False,
47 'automatically pass --save to "search" command')
48
49 parser = argparse.ArgumentParser(description='''
50 Search memory for byte sequences, strings, pointers, and integer values
51 ''')
52 parser.add_argument('-t', '--type', choices=['byte','short','dword','qword','pointer','string','bytes'],
53 help='Size of search target', default='bytes', type=str)
54 parser.add_argument('-1', '--byte', dest='type', action='store_const', const='byte',
55 help='Search for a 1-byte integer')
56 parser.add_argument('-2', '--word', dest='type', action='store_const', const='word',
57 help='Search for a 2-byte integer')
58 parser.add_argument('-4', '--dword', dest='type', action='store_const', const='dword',
59 help='Search for a 4-byte integer')
60 parser.add_argument('-8', '--qword', dest='type', action='store_const', const='qword',
61 help='Search for an 8-byte integer')
62 parser.add_argument('-p', '--pointer', dest='type', action='store_const', const='pointer',
63 help='Search for a pointer-width integer')
64 parser.add_argument('-x', '--hex', action='store_true',
65 help='Target is a hex-encoded (for bytes/strings)')
66 parser.add_argument('-s', '--string', action='store_true',
67 help='Target is a raw string')
68 parser.add_argument('-e', '--executable', action='store_true',
69 help='Search executable segments only')
70 parser.add_argument('-w', '--writable', action='store_true',
71 help='Search writable segments only')
72 parser.add_argument('value', type=str,
73 help='Value to search for')
74 parser.add_argument('mapping', type=str, nargs='?', default=None,
75 help='Mapping to search [e.g. libc]')
76 parser.add_argument('--save', action='store_true', default=None,
77 help='Save results for --resume. Default comes from config %r' % auto_save.name)
78 parser.add_argument('--no-save', action='store_false', default=None, dest='save',
79 help='Invert --save')
80 parser.add_argument('-n', '--next', action='store_true',
81 help='Search only locations returned by previous search with --save')
82
83 @pwndbg.commands.ArgparsedCommand(parser)
84 @pwndbg.commands.OnlyWhenRunning
85 def search(type, hex, string, executable, writable, value, mapping, save, next):
86 # Adjust pointer sizes to the local architecture
87 if type == 'pointer':
88 type = {
89 4: 'dword',
90 8: 'qword'
91 }[pwndbg.arch.ptrsize]
92
93 if save is None:
94 save = bool(pwndbg.config.auto_save_search)
95
96 if hex:
97 value = codecs.decode(value, 'hex')
98
99 # Convert to an integer if needed, and pack to bytes
100 if type not in ('string', 'bytes'):
101 value = pwndbg.commands.fix_int(value)
102 fmt = {
103 'little': '<',
104 'big': '>'
105 }[pwndbg.arch.endian] + {
106 'byte': 'B',
107 'short': 'H',
108 'dword': 'L',
109 'qword': 'Q'
110 }[type]
111
112 value = struct.pack(fmt, value)
113
114 # Null-terminate strings
115 elif type == 'string':
116 value += b'\x00'
117
118 # Prep the saved set if necessary
119 global saved
120 if save:
121 saved = set()
122
123 # Perform the search
124 for address in pwndbg.search.search(value,
125 mapping=mapping,
126 executable=executable,
127 writable=writable):
128
129 if next and address not in saved:
130 continue
131
132 if save:
133 saved.add(address)
134
135 print_search_hit(address)
136
[end of pwndbg/commands/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/commands/search.py b/pwndbg/commands/search.py
--- a/pwndbg/commands/search.py
+++ b/pwndbg/commands/search.py
@@ -12,6 +12,7 @@
import gdb
+import pwndbg.arch
import pwndbg.color.memory as M
import pwndbg.commands
import pwndbg.config
@@ -99,6 +100,7 @@
# Convert to an integer if needed, and pack to bytes
if type not in ('string', 'bytes'):
value = pwndbg.commands.fix_int(value)
+ value &= pwndbg.arch.ptrmask
fmt = {
'little': '<',
'big': '>'
|
{"golden_diff": "diff --git a/pwndbg/commands/search.py b/pwndbg/commands/search.py\n--- a/pwndbg/commands/search.py\n+++ b/pwndbg/commands/search.py\n@@ -12,6 +12,7 @@\n \n import gdb\n \n+import pwndbg.arch\n import pwndbg.color.memory as M\n import pwndbg.commands\n import pwndbg.config\n@@ -99,6 +100,7 @@\n # Convert to an integer if needed, and pack to bytes\n if type not in ('string', 'bytes'):\n value = pwndbg.commands.fix_int(value)\n+ value &= pwndbg.arch.ptrmask\n fmt = {\n 'little': '<',\n 'big': '>'\n", "issue": "Error in the `search` command\nThe `search` command appears to have some issues while searching signed integer data \nFor example while debugging a x86 program:\n\n```\npwndbg> search -4 0xf7eebf83\nTraceback (most recent call last):\n File \"/home/bruce30262/pwndbg/pwndbg/commands/__init__.py\", line 57, in __call__\n return self.function(*args, **kwargs)\n File \"/home/bruce30262/pwndbg/pwndbg/commands/__init__.py\", line 162, in _ArgparsedCommand\n return function(**vars(args))\n File \"/home/bruce30262/pwndbg/pwndbg/commands/__init__.py\", line 115, in _OnlyWhenRunning\n return function(*a, **kw)\n File \"/home/bruce30262/pwndbg/pwndbg/commands/search.py\", line 112, in search\n value = struct.pack(fmt, value)\nstruct.error: argument out of range\n```\n\nThis is because `pwndbg.commands.fix_int(value)` returns `-135348349` instead of an unsigned integer, and the packing format in [search.py](https://github.com/pwndbg/pwndbg/blob/b1892b27741a478cd5361061b0b4dda9ef46d02e/pwndbg/commands/search.py#L106) only support the unsigned data type, causing the error. \nCommands like `search -4 -- -100` will also cause the same error. \nKind of curious why it only support the unsigned data type though. It might need some additional check for the appropriate packing format.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport codecs\nimport os\nimport struct\n\nimport gdb\n\nimport pwndbg.color.memory as M\nimport pwndbg.commands\nimport pwndbg.config\nimport pwndbg.enhance\nimport pwndbg.search\nimport pwndbg.vmmap\n\nsaved = set()\n\ndef print_search_hit(address):\n \"\"\"Prints out a single search hit.\n\n Arguments:\n address(int): Address to print\n \"\"\"\n if not address:\n return\n\n vmmap = pwndbg.vmmap.find(address)\n if vmmap:\n region = os.path.basename(vmmap.objfile)\n else:\n region = '[mapped]'\n\n region = region.ljust(15)\n\n region = M.get(address, region)\n addr = M.get(address)\n display = pwndbg.enhance.enhance(address)\n print(region,addr,display)\n\nauto_save = pwndbg.config.Parameter('auto-save-search', False,\n 'automatically pass --save to \"search\" command')\n\nparser = argparse.ArgumentParser(description='''\nSearch memory for byte sequences, strings, pointers, and integer values\n''')\nparser.add_argument('-t', '--type', choices=['byte','short','dword','qword','pointer','string','bytes'],\n help='Size of search target', default='bytes', type=str)\nparser.add_argument('-1', '--byte', dest='type', action='store_const', const='byte',\n help='Search for a 1-byte integer')\nparser.add_argument('-2', '--word', dest='type', action='store_const', const='word',\n help='Search for a 2-byte integer')\nparser.add_argument('-4', '--dword', dest='type', action='store_const', const='dword',\n help='Search for a 4-byte integer')\nparser.add_argument('-8', '--qword', dest='type', action='store_const', const='qword',\n help='Search for an 8-byte integer')\nparser.add_argument('-p', '--pointer', dest='type', action='store_const', const='pointer',\n help='Search for a pointer-width integer')\nparser.add_argument('-x', '--hex', action='store_true',\n help='Target is a hex-encoded (for bytes/strings)')\nparser.add_argument('-s', '--string', action='store_true',\n help='Target is a raw string')\nparser.add_argument('-e', '--executable', action='store_true',\n help='Search executable segments only')\nparser.add_argument('-w', '--writable', action='store_true',\n help='Search writable segments only')\nparser.add_argument('value', type=str,\n help='Value to search for')\nparser.add_argument('mapping', type=str, nargs='?', default=None,\n help='Mapping to search [e.g. libc]')\nparser.add_argument('--save', action='store_true', default=None,\n help='Save results for --resume. Default comes from config %r' % auto_save.name)\nparser.add_argument('--no-save', action='store_false', default=None, dest='save',\n help='Invert --save')\nparser.add_argument('-n', '--next', action='store_true',\n help='Search only locations returned by previous search with --save')\n\[email protected](parser)\[email protected]\ndef search(type, hex, string, executable, writable, value, mapping, save, next):\n # Adjust pointer sizes to the local architecture\n if type == 'pointer':\n type = {\n 4: 'dword',\n 8: 'qword'\n }[pwndbg.arch.ptrsize]\n\n if save is None:\n save = bool(pwndbg.config.auto_save_search)\n\n if hex:\n value = codecs.decode(value, 'hex')\n\n # Convert to an integer if needed, and pack to bytes\n if type not in ('string', 'bytes'):\n value = pwndbg.commands.fix_int(value)\n fmt = {\n 'little': '<',\n 'big': '>'\n }[pwndbg.arch.endian] + {\n 'byte': 'B',\n 'short': 'H',\n 'dword': 'L',\n 'qword': 'Q'\n }[type]\n\n value = struct.pack(fmt, value)\n\n # Null-terminate strings\n elif type == 'string':\n value += b'\\x00'\n\n # Prep the saved set if necessary\n global saved\n if save:\n saved = set()\n\n # Perform the search\n for address in pwndbg.search.search(value,\n mapping=mapping,\n executable=executable,\n writable=writable):\n\n if next and address not in saved:\n continue\n\n if save:\n saved.add(address)\n\n print_search_hit(address)\n", "path": "pwndbg/commands/search.py"}]}
| 2,315 | 160 |
gh_patches_debug_24
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-2007
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'
### 🐛 Describe the bug
I install colossalAI with the command `pip install colossalai==0.1.11rc3+torch1.12cu11.3 -f https://release.colossalai.org`
But I get an error when follow https://github.com/hpcaitech/ColossalAI/tree/main/examples/tutorial#-run-opt-finetuning-and-inference, I just run `bash ./run_clm_synthetic.sh` and get an error as follows:
```shell
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /home/he.yan/ColossalAI/examples/tutorial/opt/opt/run_clm.py:46 in <module> │
│ │
│ 43 from colossalai.core import global_context as gpc │
│ 44 from colossalai.logging import disable_existing_loggers, get_dist_logger │
│ 45 from colossalai.nn.optimizer import HybridAdam │
│ ❱ 46 from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer │
│ 47 from colossalai.nn.parallel import ZeroDDP │
│ 48 from colossalai.tensor import ProcessGroup │
│ 49 from colossalai.utils import get_current_device, get_dataloader │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'
```
### Environment
Python 3.8.15
torch1.12cu11.3
</issue>
<code>
[start of colossalai/__init__.py]
1 from .initialize import (
2 get_default_parser,
3 initialize,
4 launch,
5 launch_from_openmpi,
6 launch_from_slurm,
7 launch_from_torch,
8 )
9
10 __version__ = '0.1.11rc2'
11
[end of colossalai/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/__init__.py b/colossalai/__init__.py
--- a/colossalai/__init__.py
+++ b/colossalai/__init__.py
@@ -7,4 +7,4 @@
launch_from_torch,
)
-__version__ = '0.1.11rc2'
+__version__ = '0.1.11rc4'
|
{"golden_diff": "diff --git a/colossalai/__init__.py b/colossalai/__init__.py\n--- a/colossalai/__init__.py\n+++ b/colossalai/__init__.py\n@@ -7,4 +7,4 @@\n launch_from_torch,\n )\n \n-__version__ = '0.1.11rc2'\n+__version__ = '0.1.11rc4'\n", "issue": "[BUG]: ModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'\n### \ud83d\udc1b Describe the bug\r\n\r\nI install colossalAI with the command `pip install colossalai==0.1.11rc3+torch1.12cu11.3 -f https://release.colossalai.org`\r\nBut I get an error when follow https://github.com/hpcaitech/ColossalAI/tree/main/examples/tutorial#-run-opt-finetuning-and-inference, I just run `bash ./run_clm_synthetic.sh` and get an error as follows:\r\n\r\n```shell\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 /home/he.yan/ColossalAI/examples/tutorial/opt/opt/run_clm.py:46 in <module> \u2502\r\n\u2502 \u2502\r\n\u2502 43 from colossalai.core import global_context as gpc \u2502\r\n\u2502 44 from colossalai.logging import disable_existing_loggers, get_dist_logger \u2502\r\n\u2502 45 from colossalai.nn.optimizer import HybridAdam \u2502\r\n\u2502 \u2771 46 from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer \u2502\r\n\u2502 47 from colossalai.nn.parallel import ZeroDDP \u2502\r\n\u2502 48 from colossalai.tensor import ProcessGroup \u2502\r\n\u2502 49 from colossalai.utils import get_current_device, get_dataloader \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nModuleNotFoundError: No module named 'colossalai.nn.optimizer.zero_optimizer'\r\n```\r\n\r\n### Environment\r\n\r\nPython 3.8.15\r\ntorch1.12cu11.3\n", "before_files": [{"content": "from .initialize import (\n get_default_parser,\n initialize,\n launch,\n launch_from_openmpi,\n launch_from_slurm,\n launch_from_torch,\n)\n\n__version__ = '0.1.11rc2'\n", "path": "colossalai/__init__.py"}]}
| 966 | 92 |
gh_patches_debug_2151
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-4665
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
migrations fail for database name containing "-"
**Describe the bug**
Applying memory.0007_use_trigram...Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.SyntaxError: syntax error at or near "-"
LINE 1: ALTER DATABASE weblate-staging SET pg_trgm.similarity_thresh...
^
**To Reproduce**
Set the database name to "weblate-staging"
I worked around this by changing of
ALTER DATABASE {} SET
to
ALTER DATABASE \"{}\" SET
in 0007_use_trigram.py and 0008_adjust_similarity.py.
weblate-4.1.1
</issue>
<code>
[start of weblate/memory/migrations/0008_adjust_similarity.py]
1 # Generated by Django 3.0.5 on 2020-05-12 11:44
2
3 from django.db import migrations
4
5
6 def update_index(apps, schema_editor):
7 if schema_editor.connection.vendor != "postgresql":
8 return
9 # This ensures that extensions are loaded into the session. Without that
10 # the next ALTER database fails unless we're running as superuser (which
11 # is allowed to set non existing parameters, so missing extension doesn't
12 # matter)
13 # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us
14 schema_editor.execute("SELECT show_limit()")
15
16 schema_editor.execute(
17 "ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5".format(
18 schema_editor.connection.settings_dict["USER"]
19 )
20 )
21
22
23 class Migration(migrations.Migration):
24
25 dependencies = [
26 ("memory", "0007_use_trigram"),
27 ]
28
29 operations = [
30 migrations.RunPython(
31 update_index, migrations.RunPython.noop, elidable=False, atomic=False
32 )
33 ]
34
[end of weblate/memory/migrations/0008_adjust_similarity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/weblate/memory/migrations/0008_adjust_similarity.py b/weblate/memory/migrations/0008_adjust_similarity.py
--- a/weblate/memory/migrations/0008_adjust_similarity.py
+++ b/weblate/memory/migrations/0008_adjust_similarity.py
@@ -15,7 +15,7 @@
schema_editor.execute(
"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5".format(
- schema_editor.connection.settings_dict["USER"]
+ schema_editor.quote_name(schema_editor.connection.settings_dict["USER"])
)
)
|
{"golden_diff": "diff --git a/weblate/memory/migrations/0008_adjust_similarity.py b/weblate/memory/migrations/0008_adjust_similarity.py\n--- a/weblate/memory/migrations/0008_adjust_similarity.py\n+++ b/weblate/memory/migrations/0008_adjust_similarity.py\n@@ -15,7 +15,7 @@\n \n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n- schema_editor.connection.settings_dict[\"USER\"]\n+ schema_editor.quote_name(schema_editor.connection.settings_dict[\"USER\"])\n )\n )\n", "issue": "migrations fail for database name containing \"-\"\n**Describe the bug**\r\n Applying memory.0007_use_trigram...Traceback (most recent call last):\r\n File \"/usr/lib/python3.6/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.errors.SyntaxError: syntax error at or near \"-\"\r\nLINE 1: ALTER DATABASE weblate-staging SET pg_trgm.similarity_thresh...\r\n ^\r\n**To Reproduce**\r\nSet the database name to \"weblate-staging\"\r\n\r\nI worked around this by changing of\r\nALTER DATABASE {} SET\r\nto\r\nALTER DATABASE \\\"{}\\\" SET\r\nin 0007_use_trigram.py and 0008_adjust_similarity.py.\r\n\r\nweblate-4.1.1\n", "before_files": [{"content": "# Generated by Django 3.0.5 on 2020-05-12 11:44\n\nfrom django.db import migrations\n\n\ndef update_index(apps, schema_editor):\n if schema_editor.connection.vendor != \"postgresql\":\n return\n # This ensures that extensions are loaded into the session. Without that\n # the next ALTER database fails unless we're running as superuser (which\n # is allowed to set non existing parameters, so missing extension doesn't\n # matter)\n # See https://www.postgresql.org/message-id/6376.1533675236%40sss.pgh.pa.us\n schema_editor.execute(\"SELECT show_limit()\")\n\n schema_editor.execute(\n \"ALTER ROLE {} SET pg_trgm.similarity_threshold = 0.5\".format(\n schema_editor.connection.settings_dict[\"USER\"]\n )\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"memory\", \"0007_use_trigram\"),\n ]\n\n operations = [\n migrations.RunPython(\n update_index, migrations.RunPython.noop, elidable=False, atomic=False\n )\n ]\n", "path": "weblate/memory/migrations/0008_adjust_similarity.py"}]}
| 1,043 | 134 |
gh_patches_debug_2298
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-1436
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
repro: --all-pipelines doesn't do anything.
</issue>
<code>
[start of dvc/command/repro.py]
1 import os
2
3 from dvc.command.base import CmdBase
4 from dvc.command.status import CmdDataStatus
5 from dvc.exceptions import DvcException
6
7
8 class CmdRepro(CmdBase):
9 def run(self):
10 recursive = not self.args.single_item
11 saved_dir = os.path.realpath(os.curdir)
12 if self.args.cwd:
13 os.chdir(self.args.cwd)
14
15 ret = 0
16 for target in self.args.targets:
17 try:
18 stages = self.project.reproduce(
19 target,
20 recursive=recursive,
21 force=self.args.force,
22 dry=self.args.dry,
23 interactive=self.args.interactive,
24 pipeline=self.args.pipeline,
25 all_pipelines=self.args.all_pipelines,
26 ignore_build_cache=self.args.ignore_build_cache)
27
28 if len(stages) == 0:
29 self.project.logger.info(CmdDataStatus.UP_TO_DATE_MSG)
30
31 if self.args.metrics:
32 self.project.metrics_show()
33 except DvcException as ex:
34 msg = 'Failed to reproduce \'{}\''.format(target)
35 self.project.logger.error(msg, ex)
36 ret = 1
37 break
38
39 os.chdir(saved_dir)
40 return ret
41
[end of dvc/command/repro.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/command/repro.py b/dvc/command/repro.py
--- a/dvc/command/repro.py
+++ b/dvc/command/repro.py
@@ -12,6 +12,10 @@
if self.args.cwd:
os.chdir(self.args.cwd)
+ # Dirty hack so the for loop below can at least enter once
+ if self.args.all_pipelines:
+ self.args.targets = [None]
+
ret = 0
for target in self.args.targets:
try:
|
{"golden_diff": "diff --git a/dvc/command/repro.py b/dvc/command/repro.py\n--- a/dvc/command/repro.py\n+++ b/dvc/command/repro.py\n@@ -12,6 +12,10 @@\n if self.args.cwd:\n os.chdir(self.args.cwd)\n \n+ # Dirty hack so the for loop below can at least enter once\n+ if self.args.all_pipelines:\n+ self.args.targets = [None]\n+\n ret = 0\n for target in self.args.targets:\n try:\n", "issue": "repro: --all-pipelines doesn't do anything.\n\n", "before_files": [{"content": "import os\n\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.project.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache)\n\n if len(stages) == 0:\n self.project.logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.project.metrics_show()\n except DvcException as ex:\n msg = 'Failed to reproduce \\'{}\\''.format(target)\n self.project.logger.error(msg, ex)\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n", "path": "dvc/command/repro.py"}]}
| 871 | 115 |
gh_patches_debug_6720
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-2315
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
wine sandboxing error in 0.5.3
Hello,
it seems you resolved issue #2114 but now, when I launch any of my wine games, I obtain this error message:
`[Errno 21] est un dossier: '/home/legluondunet/Games/test/drive_c/users/legluondunet/Downloads'`
* "est un dossier" = "is a folder"
then an other error message:
`en error prevented the game to launch`
I can reproduce this issue with all my installed wine game, **even with new installed game**.
Workaround: disable sandboxing...
</issue>
<code>
[start of lutris/util/wine/prefix.py]
1 """Wine prefix management"""
2 import os
3 from lutris.util.wine.registry import WineRegistry
4 from lutris.util.log import logger
5 from lutris.util import joypad, system, i18n
6 from lutris.util.display import DISPLAY_MANAGER
7
8 DESKTOP_KEYS = ["Desktop", "My Music", "My Pictures", "My Videos", "Personal"]
9
10
11 class WinePrefixManager:
12 """Class to allow modification of Wine prefixes without the use of Wine"""
13
14 hkcu_prefix = "HKEY_CURRENT_USER"
15
16 def __init__(self, path):
17 if not path:
18 logger.warning("No path specified for Wine prefix")
19 self.path = path
20
21 def setup_defaults(self):
22 """Sets the defaults for newly created prefixes"""
23 self.override_dll("winemenubuilder.exe", "")
24 self.override_dll("steamwebhelper.exe", "")
25 try:
26 self.desktop_integration()
27 except OSError as ex:
28 logger.error("Failed to setup desktop integration, the prefix may not be valid.")
29 logger.exception(ex)
30
31 def get_registry_path(self, key):
32 """Matches registry keys to a registry file
33
34 Currently, only HKEY_CURRENT_USER keys are supported.
35 """
36 if key.startswith(self.hkcu_prefix):
37 return os.path.join(self.path, "user.reg")
38 raise ValueError("Unsupported key '{}'".format(key))
39
40 def get_key_path(self, key):
41 if key.startswith(self.hkcu_prefix):
42 return key[len(self.hkcu_prefix) + 1:]
43 raise ValueError(
44 "The key {} is currently not supported by WinePrefixManager".format(key)
45 )
46
47 def get_registry_key(self,key,subkey):
48 registry = WineRegistry(self.get_registry_path(key))
49 return registry.query(self.get_key_path(key),subkey)
50
51 def set_registry_key(self, key, subkey, value):
52 registry = WineRegistry(self.get_registry_path(key))
53 registry.set_value(self.get_key_path(key), subkey, value)
54 registry.save()
55
56 def clear_registry_key(self, key):
57 registry = WineRegistry(self.get_registry_path(key))
58 registry.clear_key(self.get_key_path(key))
59 registry.save()
60
61 def clear_registry_subkeys(self, key, subkeys):
62 registry = WineRegistry(self.get_registry_path(key))
63 registry.clear_subkeys(self.get_key_path(key), subkeys)
64 registry.save()
65
66 def override_dll(self, dll, mode):
67 key = self.hkcu_prefix + "/Software/Wine/DllOverrides"
68 if mode.startswith("dis"):
69 mode = ""
70 if mode not in ("builtin", "native", "builtin,native", "native,builtin", ""):
71 logger.error("DLL override '%s' mode is not valid", mode)
72 return
73 self.set_registry_key(key, dll, mode)
74
75 def desktop_integration(self, desktop_dir=None):
76 """Overwrite desktop integration"""
77 DESKTOP_FOLDERS = []
78
79 user = os.getenv("USER")
80 user_dir = os.path.join(self.path, "drive_c/users/", user)
81
82 for key in DESKTOP_KEYS:
83 folder = self.get_registry_key(self.hkcu_prefix+"/Software/Microsoft/Windows/CurrentVersion/Explorer/Shell Folders",key)
84 DESKTOP_FOLDERS.append(folder[folder.rfind("\\")+1:])
85
86 if not desktop_dir:
87 desktop_dir = user_dir
88 else:
89 desktop_dir = os.path.expanduser(desktop_dir)
90
91 if system.path_exists(user_dir):
92 # Replace desktop integration symlinks
93 for item in DESKTOP_FOLDERS:
94 path = os.path.join(user_dir, item)
95 old_path = path + ".winecfg"
96
97 if os.path.islink(path):
98 os.unlink(path)
99 elif os.path.isdir(path):
100 try:
101 os.rmdir(path)
102 # We can't delete nonempty dir, so we rename as wine do.
103 except OSError:
104 os.rename(path, old_path)
105
106 if desktop_dir != user_dir:
107 src_path = os.path.join(desktop_dir, item)
108 os.makedirs(src_path, exist_ok=True)
109 os.symlink(src_path, path)
110 else:
111 # We use first the renamed dir, otherwise we make it.
112 if os.path.isdir(old_path):
113 os.rename(old_path, path)
114 else:
115 os.makedirs(path, exist_ok=True)
116
117 # Security: Remove other symlinks.
118 for item in os.listdir(user_dir):
119 if item not in DESKTOP_FOLDERS and os.path.islink(item):
120 path = os.path.join(user_dir, item)
121 os.unlink(path)
122 os.makedirs(path)
123
124 def set_crash_dialogs(self, enabled):
125 """Enable or diable Wine crash dialogs"""
126 self.set_registry_key(
127 self.hkcu_prefix + "/Software/Wine/WineDbg",
128 "ShowCrashDialog",
129 1 if enabled else 0,
130 )
131
132 def set_virtual_desktop(self, enabled):
133 """Enable or disable wine virtual desktop.
134 The Lutris virtual desktop is refered to as 'WineDesktop', in Wine the
135 virtual desktop name is 'default'.
136 """
137 path = self.hkcu_prefix + "/Software/Wine/Explorer"
138 if enabled:
139 self.set_registry_key(path, "Desktop", "WineDesktop")
140 default_resolution = "x".join(DISPLAY_MANAGER.get_current_resolution())
141 logger.debug("Enabling wine virtual desktop with default resolution of %s",
142 default_resolution)
143 self.set_registry_key(
144 self.hkcu_prefix + "/Software/Wine/Explorer/Desktops",
145 "WineDesktop",
146 default_resolution
147 )
148 else:
149 self.clear_registry_key(path)
150
151 def set_desktop_size(self, desktop_size):
152 """Sets the desktop size if one is given but do not reset the key if
153 one isn't.
154 """
155 path = self.hkcu_prefix + "/Software/Wine/Explorer/Desktops"
156 if desktop_size:
157 self.set_registry_key(path, "WineDesktop", desktop_size)
158
159 def use_xvid_mode(self, enabled):
160 """Set this to "Y" to allow wine switch the resolution using XVidMode extension."""
161 self.set_registry_key(
162 self.hkcu_prefix + "/Software/Wine/X11 Driver",
163 "UseXVidMode",
164 "Y" if enabled else "N",
165 )
166
167 def configure_joypads(self):
168 joypads = joypad.get_joypads()
169 key = self.hkcu_prefix + "/Software/Wine/DirectInput/Joysticks"
170 self.clear_registry_key(key)
171 for device, joypad_name in joypads:
172 if "event" in device:
173 disabled_joypad = "{} (js)".format(joypad_name)
174 else:
175 disabled_joypad = "{} (event)".format(joypad_name)
176 self.set_registry_key(key, disabled_joypad, "disabled")
177
[end of lutris/util/wine/prefix.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/util/wine/prefix.py b/lutris/util/wine/prefix.py
--- a/lutris/util/wine/prefix.py
+++ b/lutris/util/wine/prefix.py
@@ -116,8 +116,8 @@
# Security: Remove other symlinks.
for item in os.listdir(user_dir):
- if item not in DESKTOP_FOLDERS and os.path.islink(item):
- path = os.path.join(user_dir, item)
+ path = os.path.join(user_dir, item)
+ if item not in DESKTOP_FOLDERS and os.path.islink(path):
os.unlink(path)
os.makedirs(path)
|
{"golden_diff": "diff --git a/lutris/util/wine/prefix.py b/lutris/util/wine/prefix.py\n--- a/lutris/util/wine/prefix.py\n+++ b/lutris/util/wine/prefix.py\n@@ -116,8 +116,8 @@\n \n # Security: Remove other symlinks.\n for item in os.listdir(user_dir):\n- if item not in DESKTOP_FOLDERS and os.path.islink(item):\n- path = os.path.join(user_dir, item)\n+ path = os.path.join(user_dir, item)\n+ if item not in DESKTOP_FOLDERS and os.path.islink(path):\n os.unlink(path)\n os.makedirs(path)\n", "issue": "wine sandboxing error in 0.5.3\nHello,\r\nit seems you resolved issue #2114 but now, when I launch any of my wine games, I obtain this error message:\r\n`[Errno 21] est un dossier: '/home/legluondunet/Games/test/drive_c/users/legluondunet/Downloads'`\r\n\r\n* \"est un dossier\" = \"is a folder\"\r\n\r\nthen an other error message:\r\n`en error prevented the game to launch`\r\n\r\nI can reproduce this issue with all my installed wine game, **even with new installed game**.\r\nWorkaround: disable sandboxing...\n", "before_files": [{"content": "\"\"\"Wine prefix management\"\"\"\nimport os\nfrom lutris.util.wine.registry import WineRegistry\nfrom lutris.util.log import logger\nfrom lutris.util import joypad, system, i18n\nfrom lutris.util.display import DISPLAY_MANAGER\n\nDESKTOP_KEYS = [\"Desktop\", \"My Music\", \"My Pictures\", \"My Videos\", \"Personal\"]\n\n\nclass WinePrefixManager:\n \"\"\"Class to allow modification of Wine prefixes without the use of Wine\"\"\"\n\n hkcu_prefix = \"HKEY_CURRENT_USER\"\n\n def __init__(self, path):\n if not path:\n logger.warning(\"No path specified for Wine prefix\")\n self.path = path\n\n def setup_defaults(self):\n \"\"\"Sets the defaults for newly created prefixes\"\"\"\n self.override_dll(\"winemenubuilder.exe\", \"\")\n self.override_dll(\"steamwebhelper.exe\", \"\")\n try:\n self.desktop_integration()\n except OSError as ex:\n logger.error(\"Failed to setup desktop integration, the prefix may not be valid.\")\n logger.exception(ex)\n\n def get_registry_path(self, key):\n \"\"\"Matches registry keys to a registry file\n\n Currently, only HKEY_CURRENT_USER keys are supported.\n \"\"\"\n if key.startswith(self.hkcu_prefix):\n return os.path.join(self.path, \"user.reg\")\n raise ValueError(\"Unsupported key '{}'\".format(key))\n\n def get_key_path(self, key):\n if key.startswith(self.hkcu_prefix):\n return key[len(self.hkcu_prefix) + 1:]\n raise ValueError(\n \"The key {} is currently not supported by WinePrefixManager\".format(key)\n )\n\n def get_registry_key(self,key,subkey):\n registry = WineRegistry(self.get_registry_path(key))\n return registry.query(self.get_key_path(key),subkey)\n\n def set_registry_key(self, key, subkey, value):\n registry = WineRegistry(self.get_registry_path(key))\n registry.set_value(self.get_key_path(key), subkey, value)\n registry.save()\n\n def clear_registry_key(self, key):\n registry = WineRegistry(self.get_registry_path(key))\n registry.clear_key(self.get_key_path(key))\n registry.save()\n\n def clear_registry_subkeys(self, key, subkeys):\n registry = WineRegistry(self.get_registry_path(key))\n registry.clear_subkeys(self.get_key_path(key), subkeys)\n registry.save()\n\n def override_dll(self, dll, mode):\n key = self.hkcu_prefix + \"/Software/Wine/DllOverrides\"\n if mode.startswith(\"dis\"):\n mode = \"\"\n if mode not in (\"builtin\", \"native\", \"builtin,native\", \"native,builtin\", \"\"):\n logger.error(\"DLL override '%s' mode is not valid\", mode)\n return\n self.set_registry_key(key, dll, mode)\n\n def desktop_integration(self, desktop_dir=None):\n \"\"\"Overwrite desktop integration\"\"\"\n DESKTOP_FOLDERS = []\n\n user = os.getenv(\"USER\")\n user_dir = os.path.join(self.path, \"drive_c/users/\", user)\n\n for key in DESKTOP_KEYS:\n folder = self.get_registry_key(self.hkcu_prefix+\"/Software/Microsoft/Windows/CurrentVersion/Explorer/Shell Folders\",key)\n DESKTOP_FOLDERS.append(folder[folder.rfind(\"\\\\\")+1:]) \n\n if not desktop_dir:\n desktop_dir = user_dir\n else:\n desktop_dir = os.path.expanduser(desktop_dir)\n\n if system.path_exists(user_dir):\n # Replace desktop integration symlinks\n for item in DESKTOP_FOLDERS:\n path = os.path.join(user_dir, item)\n old_path = path + \".winecfg\"\n\n if os.path.islink(path):\n os.unlink(path)\n elif os.path.isdir(path):\n try:\n os.rmdir(path)\n # We can't delete nonempty dir, so we rename as wine do.\n except OSError:\n os.rename(path, old_path)\n\n if desktop_dir != user_dir:\n src_path = os.path.join(desktop_dir, item)\n os.makedirs(src_path, exist_ok=True)\n os.symlink(src_path, path)\n else:\n # We use first the renamed dir, otherwise we make it.\n if os.path.isdir(old_path):\n os.rename(old_path, path)\n else:\n os.makedirs(path, exist_ok=True)\n\n # Security: Remove other symlinks.\n for item in os.listdir(user_dir):\n if item not in DESKTOP_FOLDERS and os.path.islink(item):\n path = os.path.join(user_dir, item)\n os.unlink(path)\n os.makedirs(path)\n\n def set_crash_dialogs(self, enabled):\n \"\"\"Enable or diable Wine crash dialogs\"\"\"\n self.set_registry_key(\n self.hkcu_prefix + \"/Software/Wine/WineDbg\",\n \"ShowCrashDialog\",\n 1 if enabled else 0,\n )\n\n def set_virtual_desktop(self, enabled):\n \"\"\"Enable or disable wine virtual desktop.\n The Lutris virtual desktop is refered to as 'WineDesktop', in Wine the\n virtual desktop name is 'default'.\n \"\"\"\n path = self.hkcu_prefix + \"/Software/Wine/Explorer\"\n if enabled:\n self.set_registry_key(path, \"Desktop\", \"WineDesktop\")\n default_resolution = \"x\".join(DISPLAY_MANAGER.get_current_resolution())\n logger.debug(\"Enabling wine virtual desktop with default resolution of %s\",\n default_resolution)\n self.set_registry_key(\n self.hkcu_prefix + \"/Software/Wine/Explorer/Desktops\",\n \"WineDesktop\",\n default_resolution\n )\n else:\n self.clear_registry_key(path)\n\n def set_desktop_size(self, desktop_size):\n \"\"\"Sets the desktop size if one is given but do not reset the key if\n one isn't.\n \"\"\"\n path = self.hkcu_prefix + \"/Software/Wine/Explorer/Desktops\"\n if desktop_size:\n self.set_registry_key(path, \"WineDesktop\", desktop_size)\n\n def use_xvid_mode(self, enabled):\n \"\"\"Set this to \"Y\" to allow wine switch the resolution using XVidMode extension.\"\"\"\n self.set_registry_key(\n self.hkcu_prefix + \"/Software/Wine/X11 Driver\",\n \"UseXVidMode\",\n \"Y\" if enabled else \"N\",\n )\n\n def configure_joypads(self):\n joypads = joypad.get_joypads()\n key = self.hkcu_prefix + \"/Software/Wine/DirectInput/Joysticks\"\n self.clear_registry_key(key)\n for device, joypad_name in joypads:\n if \"event\" in device:\n disabled_joypad = \"{} (js)\".format(joypad_name)\n else:\n disabled_joypad = \"{} (event)\".format(joypad_name)\n self.set_registry_key(key, disabled_joypad, \"disabled\")\n", "path": "lutris/util/wine/prefix.py"}]}
| 2,583 | 154 |
gh_patches_debug_36027
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-5540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
$computedClass warning might be too sensitive
### Observed behavior
We added a warning:
> Used the $computedClass method for a class definition without any pseudo selectors ... Please use a v-bind:style directive instead.
However I think there might be some cases where it's necessary to use this even without pseudo-selectors:
* I've been having a bit of trouble with CSS specificity rules, which are different depending on whether the style is defined using `:style` vs `:class`. It has been a bit challenging to get the two to have the right precedence related to each other. (Side-note - I think source maps would have helped here)
* There might be situations where a class is necessary in non-pseudo-selector cases - for example when defining the classes in vue transitions.
### Expected behavior
we should investigate these cases and decide whether the warning is appropriate, or if there was another way of implementing
### User-facing consequences
N/A
### Errors and logs

### Context
0.12
</issue>
<code>
[start of kolibri/core/hooks.py]
1 """
2 Kolibri Core hooks
3 ------------------
4
5 WIP! Many applications are supposed to live inside the core namespace to make
6 it explicit that they are part of the core.
7
8 Do we put all their hooks in one module or should each app have its own hooks
9 module?
10
11 Anyways, for now to get hooks started, we have some defined here...
12 """
13 from __future__ import absolute_import
14 from __future__ import print_function
15 from __future__ import unicode_literals
16
17 import logging
18
19 from kolibri.plugins.hooks import KolibriHook
20 from kolibri.plugins.utils import plugin_url
21
22 logger = logging.getLogger(__name__)
23
24
25 class NavigationHook(KolibriHook):
26
27 # : A string label for the menu item
28 label = "Untitled"
29
30 # : A string or lazy proxy for the url
31 url = "/"
32
33 # Set this to True so that any time this is mixed in with a
34 # frontend asset hook, the resulting frontend code will be rendered inline.
35 inline = True
36
37 def get_menu(self):
38 menu = {}
39 for hook in self.registered_hooks:
40 menu[hook.label] = self.url
41 return menu
42
43 class Meta:
44
45 abstract = True
46
47
48 class RoleBasedRedirectHook(KolibriHook):
49 # User role to redirect for
50 role = None
51
52 # URL to redirect to
53 url = None
54
55 # Special flag to only redirect on first login
56 # Default to False
57 first_login = False
58
59 def plugin_url(self, plugin_class, url_name):
60 return plugin_url(plugin_class, url_name)
61
62 class Meta:
63
64 abstract = True
65
[end of kolibri/core/hooks.py]
[start of kolibri/core/templatetags/kolibri_tags.py]
1 """
2 Kolibri template tags
3 =====================
4 """
5 from __future__ import absolute_import
6 from __future__ import print_function
7 from __future__ import unicode_literals
8
9 import copy
10 import json
11 import logging
12 import re
13
14 from django import template
15 from django.conf import settings
16 from django.contrib.staticfiles.templatetags.staticfiles import static
17 from django.core.urlresolvers import get_resolver
18 from django.core.urlresolvers import get_script_prefix
19 from django.core.urlresolvers import resolve
20 from django.core.urlresolvers import reverse
21 from django.template.loader import render_to_string
22 from django.utils.html import mark_safe
23 from django.utils.translation import get_language
24 from django.utils.translation import get_language_bidi
25 from django.utils.translation import get_language_info
26 from django_js_reverse.core import prepare_url_list
27 from django_js_reverse.rjsmin import jsmin
28 from rest_framework.renderers import JSONRenderer
29 from six import iteritems
30
31 import kolibri
32 from kolibri.core.device.models import ContentCacheKey
33 from kolibri.core.hooks import NavigationHook
34 from kolibri.core.webpack.utils import webpack_asset_render
35 from kolibri.utils import conf
36 from kolibri.utils import i18n
37
38 register = template.Library()
39
40 logger = logging.getLogger(__name__)
41
42
43 @register.simple_tag()
44 def kolibri_content_cache_key():
45 js = """
46 <script>
47 var contentCacheKey = '{cache_key}';
48 </script>
49 """.format(
50 cache_key=ContentCacheKey.get_cache_key()
51 )
52 return mark_safe(js)
53
54
55 @register.simple_tag(takes_context=True)
56 def kolibri_language_globals(context):
57
58 template = """
59 <script>
60 var languageCode = '{lang_code}';
61 var languageDir = '{lang_dir}';
62 var languages = JSON.parse('{languages}');
63 var fullCSSFileModern = '{full_css_file_modern}?v={version}';
64 var fullCSSFileBasic = '{full_css_file_basic}?v={version}';
65 </script>
66 <link type="text/css" href="{common_css_file}?v={version}" rel="stylesheet"/>
67 <link type="text/css" href="{subset_css_file}?v={version}" rel="stylesheet"/>
68 """
69
70 language_code = get_language()
71 lang_dir = "rtl" if get_language_bidi() else "ltr"
72
73 languages = {}
74 for code, language_name in settings.LANGUAGES:
75 lang_info = next(
76 (
77 lang
78 for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES
79 if lang["intl_code"] == code
80 ),
81 None,
82 )
83 languages[code] = {
84 # Format to match the schema of the content Language model
85 "id": code,
86 "lang_name": language_name,
87 "english_name": lang_info["english_name"]
88 if lang_info
89 else get_language_info(code)["name"],
90 "lang_direction": get_language_info(code)["bidi"],
91 }
92
93 common_file = static("assets/fonts/noto-common.css")
94 subset_file = static("assets/fonts/noto-subset.{}.css".format(language_code))
95 full_file = "assets/fonts/noto-full.{}.{}.css"
96 full_file_modern = static(full_file.format(language_code, "modern"))
97 full_file_basic = static(full_file.format(language_code, "basic"))
98
99 return mark_safe(
100 template.format(
101 lang_code=language_code,
102 lang_dir=lang_dir,
103 languages=json.dumps(languages),
104 common_css_file=common_file,
105 subset_css_file=subset_file,
106 full_css_file_modern=full_file_modern,
107 full_css_file_basic=full_file_basic,
108 # Temporary cache busting strategy.
109 # Would be better to use ManifestStaticFilesStorage
110 version=kolibri.__version__,
111 )
112 )
113
114
115 @register.simple_tag()
116 def kolibri_navigation_actions():
117 """
118 A tag to include an initial JS-object to bootstrap nav action data into the app.
119 :return: An html string
120 """
121 return webpack_asset_render(NavigationHook)
122
123
124 @register.simple_tag(takes_context=True)
125 def kolibri_set_urls(context):
126 # Modified from:
127 # https://github.com/ierror/django-js-reverse/blob/master/django_js_reverse/core.py#L101
128 js_global_object_name = "window"
129 js_var_name = "kolibriUrls"
130 script_prefix = get_script_prefix()
131
132 if "request" in context:
133 default_urlresolver = get_resolver(getattr(context["request"], "urlconf", None))
134 else:
135 default_urlresolver = get_resolver(None)
136
137 js = render_to_string(
138 "django_js_reverse/urls_js.tpl",
139 {
140 "urls": sorted(list(prepare_url_list(default_urlresolver))),
141 "url_prefix": script_prefix,
142 "js_var_name": js_var_name,
143 "js_global_object_name": js_global_object_name,
144 },
145 )
146
147 js = jsmin(js)
148
149 js = (
150 """<script type="text/javascript">"""
151 + js
152 + """
153 {global_object}.staticUrl = '{static_url}';
154 </script>
155 """.format(
156 global_object=js_global_object_name, static_url=settings.STATIC_URL
157 )
158 )
159 return mark_safe(js)
160
161
162 @register.simple_tag(takes_context=True)
163 def kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):
164 response, kwargs = _kolibri_bootstrap_helper(
165 context, base_name, api_resource, "detail", **kwargs
166 )
167 html = (
168 "<script type='text/javascript'>"
169 "var model = {0}.resources.{1}.createModel(JSON.parse({2}));"
170 "model.synced = true;"
171 "</script>".format(
172 conf.KOLIBRI_CORE_JS_NAME,
173 api_resource,
174 json.dumps(JSONRenderer().render(response.data).decode("utf-8")),
175 )
176 )
177 return mark_safe(html)
178
179
180 @register.simple_tag(takes_context=True)
181 def kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):
182 response, kwargs = _kolibri_bootstrap_helper(
183 context, base_name, api_resource, "list", **kwargs
184 )
185 html = (
186 "<script type='text/javascript'>"
187 "var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));"
188 "collection.synced = true;"
189 "</script>".format(
190 conf.KOLIBRI_CORE_JS_NAME,
191 api_resource,
192 json.dumps(kwargs),
193 json.dumps(JSONRenderer().render(response.data).decode("utf-8")),
194 )
195 )
196 return mark_safe(html)
197
198
199 def _replace_dict_values(check, replace, dict):
200 for (key, value) in iteritems(dict):
201 if dict[key] is check:
202 dict[key] = replace
203
204
205 def _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):
206 reversal = dict()
207 kwargs_check = "kwargs_"
208 # remove prepended string and matching items from kwargs
209 for key in list(kwargs.keys()):
210 if kwargs_check in key:
211 item = kwargs.pop(key)
212 key = re.sub(kwargs_check, "", key)
213 reversal[key] = item
214 view, view_args, view_kwargs = resolve(
215 reverse("kolibri:core:{0}-{1}".format(base_name, route), kwargs=reversal)
216 )
217 # switch out None temporarily because invalid filtering and caching can occur
218 _replace_dict_values(None, str(""), kwargs)
219 request = copy.copy(context["request"])
220 request.GET = request.GET.copy()
221 for key in kwargs:
222 request.GET[key] = kwargs[key]
223 response = view(request, **view_kwargs)
224 _replace_dict_values(str(""), None, kwargs)
225 return response, kwargs
226
227
228 @register.simple_tag()
229 def kolibri_sentry_error_reporting():
230
231 if not conf.OPTIONS["Debug"]["SENTRY_FRONTEND_DSN"]:
232 return ""
233
234 template = """
235 <script>
236 var sentryDSN = '{dsn}';
237 var sentryEnv = '{env}';
238 </script>
239 """
240 return mark_safe(
241 template.format(
242 dsn=conf.OPTIONS["Debug"]["SENTRY_FRONTEND_DSN"],
243 env=conf.OPTIONS["Debug"]["SENTRY_ENVIRONMENT"],
244 )
245 )
246
[end of kolibri/core/templatetags/kolibri_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/core/hooks.py b/kolibri/core/hooks.py
--- a/kolibri/core/hooks.py
+++ b/kolibri/core/hooks.py
@@ -15,7 +15,9 @@
from __future__ import unicode_literals
import logging
+import warnings
+from kolibri.plugins.hooks import abstract_method
from kolibri.plugins.hooks import KolibriHook
from kolibri.plugins.utils import plugin_url
@@ -62,3 +64,47 @@
class Meta:
abstract = True
+
+
+class MultipleThemesWarning(UserWarning):
+ pass
+
+
+class ThemeHook(KolibriHook):
+ """
+ A hook to allow custom theming of Kolibri
+ """
+
+ class Meta:
+
+ abstract = True
+
+ @property
+ @abstract_method
+ def theme(self):
+ default = {
+ # Whether to show the Kolibri log
+ # Boolean
+ "showKolibriLogo": True,
+ # URL for custom logo
+ "customLogoURL": None,
+ # URL for custom login background image
+ "splashBackgroundURL": None,
+ # Color Palette specification
+ "paletteColors": {},
+ # Brand Color specification
+ "brandColors": {},
+ # Mapping from colors to particular usage
+ "tokenMapping": {},
+ }
+ theme = {}
+ once = False
+ for hook in self.registered_hooks:
+ if once:
+ warnings.warn("Multiple themes defined by plugins, ignoring all themes")
+ return default
+ for key in default:
+ theme[key] = getattr(hook, key, theme[key])
+ once = True
+
+ return theme or default
diff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py
--- a/kolibri/core/templatetags/kolibri_tags.py
+++ b/kolibri/core/templatetags/kolibri_tags.py
@@ -31,6 +31,7 @@
import kolibri
from kolibri.core.device.models import ContentCacheKey
from kolibri.core.hooks import NavigationHook
+from kolibri.core.hooks import ThemeHook
from kolibri.core.webpack.utils import webpack_asset_render
from kolibri.utils import conf
from kolibri.utils import i18n
@@ -121,6 +122,20 @@
return webpack_asset_render(NavigationHook)
[email protected]_tag()
+def kolibri_theme():
+ """
+ A tag to include a theme configuration object to add custom theming to Kolibri.
+ :return: An html string
+ """
+ template = """
+ <script>
+ var customTheme = JSON.parse('{theme}');
+ </script>
+ """
+ return mark_safe(template.format(theme=json.dumps(ThemeHook().theme)))
+
+
@register.simple_tag(takes_context=True)
def kolibri_set_urls(context):
# Modified from:
|
{"golden_diff": "diff --git a/kolibri/core/hooks.py b/kolibri/core/hooks.py\n--- a/kolibri/core/hooks.py\n+++ b/kolibri/core/hooks.py\n@@ -15,7 +15,9 @@\n from __future__ import unicode_literals\n \n import logging\n+import warnings\n \n+from kolibri.plugins.hooks import abstract_method\n from kolibri.plugins.hooks import KolibriHook\n from kolibri.plugins.utils import plugin_url\n \n@@ -62,3 +64,47 @@\n class Meta:\n \n abstract = True\n+\n+\n+class MultipleThemesWarning(UserWarning):\n+ pass\n+\n+\n+class ThemeHook(KolibriHook):\n+ \"\"\"\n+ A hook to allow custom theming of Kolibri\n+ \"\"\"\n+\n+ class Meta:\n+\n+ abstract = True\n+\n+ @property\n+ @abstract_method\n+ def theme(self):\n+ default = {\n+ # Whether to show the Kolibri log\n+ # Boolean\n+ \"showKolibriLogo\": True,\n+ # URL for custom logo\n+ \"customLogoURL\": None,\n+ # URL for custom login background image\n+ \"splashBackgroundURL\": None,\n+ # Color Palette specification\n+ \"paletteColors\": {},\n+ # Brand Color specification\n+ \"brandColors\": {},\n+ # Mapping from colors to particular usage\n+ \"tokenMapping\": {},\n+ }\n+ theme = {}\n+ once = False\n+ for hook in self.registered_hooks:\n+ if once:\n+ warnings.warn(\"Multiple themes defined by plugins, ignoring all themes\")\n+ return default\n+ for key in default:\n+ theme[key] = getattr(hook, key, theme[key])\n+ once = True\n+\n+ return theme or default\ndiff --git a/kolibri/core/templatetags/kolibri_tags.py b/kolibri/core/templatetags/kolibri_tags.py\n--- a/kolibri/core/templatetags/kolibri_tags.py\n+++ b/kolibri/core/templatetags/kolibri_tags.py\n@@ -31,6 +31,7 @@\n import kolibri\n from kolibri.core.device.models import ContentCacheKey\n from kolibri.core.hooks import NavigationHook\n+from kolibri.core.hooks import ThemeHook\n from kolibri.core.webpack.utils import webpack_asset_render\n from kolibri.utils import conf\n from kolibri.utils import i18n\n@@ -121,6 +122,20 @@\n return webpack_asset_render(NavigationHook)\n \n \[email protected]_tag()\n+def kolibri_theme():\n+ \"\"\"\n+ A tag to include a theme configuration object to add custom theming to Kolibri.\n+ :return: An html string\n+ \"\"\"\n+ template = \"\"\"\n+ <script>\n+ var customTheme = JSON.parse('{theme}');\n+ </script>\n+ \"\"\"\n+ return mark_safe(template.format(theme=json.dumps(ThemeHook().theme)))\n+\n+\n @register.simple_tag(takes_context=True)\n def kolibri_set_urls(context):\n # Modified from:\n", "issue": "$computedClass warning might be too sensitive\n### Observed behavior\r\n\r\nWe added a warning:\r\n\r\n> Used the $computedClass method for a class definition without any pseudo selectors ... Please use a v-bind:style directive instead.\r\n\r\nHowever I think there might be some cases where it's necessary to use this even without pseudo-selectors:\r\n\r\n* I've been having a bit of trouble with CSS specificity rules, which are different depending on whether the style is defined using `:style` vs `:class`. It has been a bit challenging to get the two to have the right precedence related to each other. (Side-note - I think source maps would have helped here)\r\n* There might be situations where a class is necessary in non-pseudo-selector cases - for example when defining the classes in vue transitions.\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nwe should investigate these cases and decide whether the warning is appropriate, or if there was another way of implementing\r\n\r\n### User-facing consequences\r\n\r\nN/A\r\n\r\n### Errors and logs\r\n\r\n\r\n\r\n\r\n### Context\r\n\r\n0.12\n", "before_files": [{"content": "\"\"\"\nKolibri Core hooks\n------------------\n\nWIP! Many applications are supposed to live inside the core namespace to make\nit explicit that they are part of the core.\n\nDo we put all their hooks in one module or should each app have its own hooks\nmodule?\n\nAnyways, for now to get hooks started, we have some defined here...\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom kolibri.plugins.hooks import KolibriHook\nfrom kolibri.plugins.utils import plugin_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass NavigationHook(KolibriHook):\n\n # : A string label for the menu item\n label = \"Untitled\"\n\n # : A string or lazy proxy for the url\n url = \"/\"\n\n # Set this to True so that any time this is mixed in with a\n # frontend asset hook, the resulting frontend code will be rendered inline.\n inline = True\n\n def get_menu(self):\n menu = {}\n for hook in self.registered_hooks:\n menu[hook.label] = self.url\n return menu\n\n class Meta:\n\n abstract = True\n\n\nclass RoleBasedRedirectHook(KolibriHook):\n # User role to redirect for\n role = None\n\n # URL to redirect to\n url = None\n\n # Special flag to only redirect on first login\n # Default to False\n first_login = False\n\n def plugin_url(self, plugin_class, url_name):\n return plugin_url(plugin_class, url_name)\n\n class Meta:\n\n abstract = True\n", "path": "kolibri/core/hooks.py"}, {"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport copy\nimport json\nimport logging\nimport re\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import get_resolver\nfrom django.core.urlresolvers import get_script_prefix\nfrom django.core.urlresolvers import resolve\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.html import mark_safe\nfrom django.utils.translation import get_language\nfrom django.utils.translation import get_language_bidi\nfrom django.utils.translation import get_language_info\nfrom django_js_reverse.core import prepare_url_list\nfrom django_js_reverse.rjsmin import jsmin\nfrom rest_framework.renderers import JSONRenderer\nfrom six import iteritems\n\nimport kolibri\nfrom kolibri.core.device.models import ContentCacheKey\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack.utils import webpack_asset_render\nfrom kolibri.utils import conf\nfrom kolibri.utils import i18n\n\nregister = template.Library()\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_tag()\ndef kolibri_content_cache_key():\n js = \"\"\"\n <script>\n var contentCacheKey = '{cache_key}';\n </script>\n \"\"\".format(\n cache_key=ContentCacheKey.get_cache_key()\n )\n return mark_safe(js)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_language_globals(context):\n\n template = \"\"\"\n <script>\n var languageCode = '{lang_code}';\n var languageDir = '{lang_dir}';\n var languages = JSON.parse('{languages}');\n var fullCSSFileModern = '{full_css_file_modern}?v={version}';\n var fullCSSFileBasic = '{full_css_file_basic}?v={version}';\n </script>\n <link type=\"text/css\" href=\"{common_css_file}?v={version}\" rel=\"stylesheet\"/>\n <link type=\"text/css\" href=\"{subset_css_file}?v={version}\" rel=\"stylesheet\"/>\n \"\"\"\n\n language_code = get_language()\n lang_dir = \"rtl\" if get_language_bidi() else \"ltr\"\n\n languages = {}\n for code, language_name in settings.LANGUAGES:\n lang_info = next(\n (\n lang\n for lang in i18n.KOLIBRI_SUPPORTED_LANGUAGES\n if lang[\"intl_code\"] == code\n ),\n None,\n )\n languages[code] = {\n # Format to match the schema of the content Language model\n \"id\": code,\n \"lang_name\": language_name,\n \"english_name\": lang_info[\"english_name\"]\n if lang_info\n else get_language_info(code)[\"name\"],\n \"lang_direction\": get_language_info(code)[\"bidi\"],\n }\n\n common_file = static(\"assets/fonts/noto-common.css\")\n subset_file = static(\"assets/fonts/noto-subset.{}.css\".format(language_code))\n full_file = \"assets/fonts/noto-full.{}.{}.css\"\n full_file_modern = static(full_file.format(language_code, \"modern\"))\n full_file_basic = static(full_file.format(language_code, \"basic\"))\n\n return mark_safe(\n template.format(\n lang_code=language_code,\n lang_dir=lang_dir,\n languages=json.dumps(languages),\n common_css_file=common_file,\n subset_css_file=subset_file,\n full_css_file_modern=full_file_modern,\n full_css_file_basic=full_file_basic,\n # Temporary cache busting strategy.\n # Would be better to use ManifestStaticFilesStorage\n version=kolibri.__version__,\n )\n )\n\n\[email protected]_tag()\ndef kolibri_navigation_actions():\n \"\"\"\n A tag to include an initial JS-object to bootstrap nav action data into the app.\n :return: An html string\n \"\"\"\n return webpack_asset_render(NavigationHook)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_set_urls(context):\n # Modified from:\n # https://github.com/ierror/django-js-reverse/blob/master/django_js_reverse/core.py#L101\n js_global_object_name = \"window\"\n js_var_name = \"kolibriUrls\"\n script_prefix = get_script_prefix()\n\n if \"request\" in context:\n default_urlresolver = get_resolver(getattr(context[\"request\"], \"urlconf\", None))\n else:\n default_urlresolver = get_resolver(None)\n\n js = render_to_string(\n \"django_js_reverse/urls_js.tpl\",\n {\n \"urls\": sorted(list(prepare_url_list(default_urlresolver))),\n \"url_prefix\": script_prefix,\n \"js_var_name\": js_var_name,\n \"js_global_object_name\": js_global_object_name,\n },\n )\n\n js = jsmin(js)\n\n js = (\n \"\"\"<script type=\"text/javascript\">\"\"\"\n + js\n + \"\"\"\n {global_object}.staticUrl = '{static_url}';\n </script>\n \"\"\".format(\n global_object=js_global_object_name, static_url=settings.STATIC_URL\n )\n )\n return mark_safe(js)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_model(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(\n context, base_name, api_resource, \"detail\", **kwargs\n )\n html = (\n \"<script type='text/javascript'>\"\n \"var model = {0}.resources.{1}.createModel(JSON.parse({2}));\"\n \"model.synced = true;\"\n \"</script>\".format(\n conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(JSONRenderer().render(response.data).decode(\"utf-8\")),\n )\n )\n return mark_safe(html)\n\n\[email protected]_tag(takes_context=True)\ndef kolibri_bootstrap_collection(context, base_name, api_resource, **kwargs):\n response, kwargs = _kolibri_bootstrap_helper(\n context, base_name, api_resource, \"list\", **kwargs\n )\n html = (\n \"<script type='text/javascript'>\"\n \"var collection = {0}.resources.{1}.createCollection({2}, JSON.parse({3}));\"\n \"collection.synced = true;\"\n \"</script>\".format(\n conf.KOLIBRI_CORE_JS_NAME,\n api_resource,\n json.dumps(kwargs),\n json.dumps(JSONRenderer().render(response.data).decode(\"utf-8\")),\n )\n )\n return mark_safe(html)\n\n\ndef _replace_dict_values(check, replace, dict):\n for (key, value) in iteritems(dict):\n if dict[key] is check:\n dict[key] = replace\n\n\ndef _kolibri_bootstrap_helper(context, base_name, api_resource, route, **kwargs):\n reversal = dict()\n kwargs_check = \"kwargs_\"\n # remove prepended string and matching items from kwargs\n for key in list(kwargs.keys()):\n if kwargs_check in key:\n item = kwargs.pop(key)\n key = re.sub(kwargs_check, \"\", key)\n reversal[key] = item\n view, view_args, view_kwargs = resolve(\n reverse(\"kolibri:core:{0}-{1}\".format(base_name, route), kwargs=reversal)\n )\n # switch out None temporarily because invalid filtering and caching can occur\n _replace_dict_values(None, str(\"\"), kwargs)\n request = copy.copy(context[\"request\"])\n request.GET = request.GET.copy()\n for key in kwargs:\n request.GET[key] = kwargs[key]\n response = view(request, **view_kwargs)\n _replace_dict_values(str(\"\"), None, kwargs)\n return response, kwargs\n\n\[email protected]_tag()\ndef kolibri_sentry_error_reporting():\n\n if not conf.OPTIONS[\"Debug\"][\"SENTRY_FRONTEND_DSN\"]:\n return \"\"\n\n template = \"\"\"\n <script>\n var sentryDSN = '{dsn}';\n var sentryEnv = '{env}';\n </script>\n \"\"\"\n return mark_safe(\n template.format(\n dsn=conf.OPTIONS[\"Debug\"][\"SENTRY_FRONTEND_DSN\"],\n env=conf.OPTIONS[\"Debug\"][\"SENTRY_ENVIRONMENT\"],\n )\n )\n", "path": "kolibri/core/templatetags/kolibri_tags.py"}]}
| 3,717 | 674 |
gh_patches_debug_3798
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-618
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error building docs with new env
```
Running Sphinx v1.8.1
loading pickled environment... failed: build environment version not current
Theme error:
sphinx_rtd_theme is no longer a hard dependency since version 1.4.0. Please install it manually.(pip install sphinx_rtd_theme)
```
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 setup(
10 name='parsl',
11 version=VERSION,
12 description='Simple data dependent workflows in Python',
13 long_description='Simple parallel workflows system for Python',
14 url='https://github.com/Parsl/parsl',
15 author='The Parsl Team',
16 author_email='[email protected]',
17 license='Apache 2.0',
18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
19 package_data={'': ['LICENSE']},
20 packages=find_packages(),
21 install_requires=install_requires,
22 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
23 'parsl/executors/extreme_scale/mpi_worker_pool.py'],
24 extras_require = {
25 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],
26 'aws' : ['boto3'],
27 'jetstream' : ['python-novaclient'],
28 'extreme_scale' : ['mpi4py'],
29 'docs' : ['nbsphinx'],
30 'google_cloud' : ['google-auth', 'google-api-python-client']
31 },
32 classifiers = [
33 # Maturity
34 'Development Status :: 3 - Alpha',
35 # Intended audience
36 'Intended Audience :: Developers',
37 # Licence, must match with licence above
38 'License :: OSI Approved :: Apache Software License',
39 # Python versions supported
40 'Programming Language :: Python :: 3.5',
41 'Programming Language :: Python :: 3.6',
42 ],
43 keywords=['Workflows', 'Scientific computing'],
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
'aws' : ['boto3'],
'jetstream' : ['python-novaclient'],
'extreme_scale' : ['mpi4py'],
- 'docs' : ['nbsphinx'],
+ 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
'google_cloud' : ['google-auth', 'google-api-python-client']
},
classifiers = [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n- 'docs' : ['nbsphinx'],\n+ 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n", "issue": "Error building docs with new env\n\r\n```\r\nRunning Sphinx v1.8.1\r\nloading pickled environment... failed: build environment version not current\r\n\r\nTheme error:\r\nsphinx_rtd_theme is no longer a hard dependency since version 1.4.0. Please install it manually.(pip install sphinx_rtd_theme)\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n package_data={'': ['LICENSE']},\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py'],\n extras_require = {\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx'],\n 'google_cloud' : ['google-auth', 'google-api-python-client']\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n)\n", "path": "setup.py"}]}
| 1,059 | 117 |
gh_patches_debug_31952
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3174
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Re-add rudamentary version of organisers to the event list API endpoint
<!--
Please add the appropriate label for what change should be made:
docs: changes to the documentation)
refactor: refactoring production code, eg. renaming a variable or rewriting a function
test: adding missing tests, refactoring tests; no production code change
chore: updating poetry etc; no production code change
-->
### Describe the change
Add a id-name based list of event organisers to the event list API endpoint.
### Motivation
Because it would make me and my Reaxit PR very happy.
### Current implementation
It got removed :(
### Suggested implementation
Just do it Kappa
</issue>
<code>
[start of website/activemembers/api/v2/serializers/member_group.py]
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group_membership import (
4 MemberGroupMembershipSerializer,
5 )
6 from activemembers.models import MemberGroup
7 from thaliawebsite.api.v2.serializers import ThumbnailSerializer
8 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
9 CleanedModelSerializer,
10 )
11
12
13 class MemberGroupSerializer(CleanedModelSerializer):
14 """API serializer for member groups."""
15
16 def __init__(self, *args, **kwargs):
17 super().__init__(*args, **kwargs)
18
19 if "get_memberships" not in self.context and "members" in self.fields:
20 self.fields.pop("members")
21
22 class Meta:
23 """Meta class for the serializer."""
24
25 model = MemberGroup
26 fields = (
27 "pk",
28 "name",
29 "type",
30 "description",
31 "since",
32 "until",
33 "contact_address",
34 "photo",
35 "members",
36 )
37
38 members = serializers.SerializerMethodField("_members")
39 type = serializers.SerializerMethodField("_type")
40 photo = ThumbnailSerializer(placeholder="activemembers/images/placeholder.png")
41
42 def _members(self, instance):
43 memberships = self.context["get_memberships"](instance).prefetch_related(
44 "member__membergroupmembership_set"
45 )
46 return MemberGroupMembershipSerializer(
47 many=True, context=self.context
48 ).to_representation(memberships)
49
50 def _type(self, instance):
51 if hasattr(instance, "board"):
52 return "board"
53 if hasattr(instance, "committee"):
54 return "committee"
55 if hasattr(instance, "society"):
56 return "society"
57 return None
58
59
60 class MemberGroupListSerializer(MemberGroupSerializer):
61 class Meta:
62 """Meta class for the serializer."""
63
64 model = MemberGroup
65 fields = (
66 "pk",
67 "name",
68 "type",
69 "description",
70 "since",
71 "until",
72 "contact_address",
73 "photo",
74 )
75
[end of website/activemembers/api/v2/serializers/member_group.py]
[start of website/events/api/v2/serializers/event.py]
1 from rest_framework import serializers
2 from rest_framework.reverse import reverse
3
4 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
5 from documents.api.v2.serializers.document import DocumentSerializer
6 from events import services
7 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
8 from events.models import Event
9 from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
10 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
11 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
12 CleanedModelSerializer,
13 )
14 from utils.snippets import create_google_maps_url
15
16
17 class EventSerializer(CleanedModelSerializer):
18 """Serializer for events."""
19
20 class Meta:
21 model = Event
22 fields = (
23 "pk",
24 "slug",
25 "url",
26 "title",
27 "description",
28 "caption",
29 "start",
30 "end",
31 "category",
32 "registration_start",
33 "registration_end",
34 "cancel_deadline",
35 "optional_registrations",
36 "location",
37 "price",
38 "fine",
39 "num_participants",
40 "max_participants",
41 "no_registration_message",
42 "registration_status",
43 "cancel_too_late_message",
44 "has_fields",
45 "food_event",
46 "maps_url",
47 "user_permissions",
48 "user_registration",
49 "organisers",
50 "documents",
51 )
52
53 description = CleanedHTMLSerializer()
54 organisers = MemberGroupSerializer(many=True)
55 user_registration = serializers.SerializerMethodField("_user_registration")
56 num_participants = serializers.SerializerMethodField("_num_participants")
57 maps_url = serializers.SerializerMethodField("_maps_url")
58 registration_status = serializers.SerializerMethodField("_registration_status")
59 price = PaymentAmountSerializer()
60 fine = PaymentAmountSerializer()
61 documents = DocumentSerializer(many=True)
62 user_permissions = serializers.SerializerMethodField("_user_permissions")
63 url = serializers.SerializerMethodField("_url")
64
65 def _user_registration(self, instance: Event):
66 if self.context["request"].member and len(instance.member_registration) > 0:
67 registration = instance.member_registration[-1]
68 return EventRegistrationSerializer(
69 registration,
70 context=self.context,
71 fields=(
72 "pk",
73 "present",
74 "queue_position",
75 "is_cancelled",
76 "is_late_cancellation",
77 "date",
78 "payment",
79 ),
80 ).data
81 return None
82
83 def _registration_status(self, instance: Event):
84 if self.context["request"].member and len(instance.member_registration) > 0:
85 registration = instance.member_registration[-1]
86 else:
87 registration = None
88 status = services.registration_status(
89 instance, registration, self.context["request"].member
90 )
91 cancel_status = services.cancel_status(instance, registration)
92
93 status_str = services.registration_status_string(status, instance, registration)
94 cancel_str = services.cancel_info_string(instance, cancel_status, status)
95 if services.show_cancel_status(status) and cancel_str != "":
96 return f"{status_str} {cancel_str}"
97 return f"{status_str}"
98
99 def _num_participants(self, instance: Event):
100 if instance.max_participants:
101 return min(instance.participant_count, instance.max_participants)
102 return instance.participant_count
103
104 def _user_permissions(self, instance):
105 member = self.context["request"].member
106 return services.event_permissions(member, instance, registration_prefetch=True)
107
108 def _url(self, instance: Event):
109 if instance.slug is None:
110 return reverse(
111 "events:event",
112 kwargs={"pk": instance.pk},
113 request=self.context["request"],
114 )
115 return reverse(
116 "events:event",
117 kwargs={"slug": instance.slug},
118 request=self.context["request"],
119 )
120
121 def _maps_url(self, instance):
122 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
123
124
125 class EventListSerializer(EventSerializer):
126 class Meta:
127 model = Event
128 fields = (
129 "pk",
130 "slug",
131 "url",
132 "title",
133 "description",
134 "caption",
135 "start",
136 "end",
137 "category",
138 "registration_start",
139 "registration_end",
140 "cancel_deadline",
141 "optional_registrations",
142 "location",
143 "price",
144 "fine",
145 "num_participants",
146 "max_participants",
147 "no_registration_message",
148 "registration_status",
149 "cancel_too_late_message",
150 "has_fields",
151 "food_event",
152 "maps_url",
153 "user_permissions",
154 "user_registration",
155 "documents",
156 )
157
[end of website/events/api/v2/serializers/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/activemembers/api/v2/serializers/member_group.py b/website/activemembers/api/v2/serializers/member_group.py
--- a/website/activemembers/api/v2/serializers/member_group.py
+++ b/website/activemembers/api/v2/serializers/member_group.py
@@ -72,3 +72,16 @@
"contact_address",
"photo",
)
+
+
+class MemberGroupShortSerializer(MemberGroupListSerializer):
+ class Meta:
+ model = MemberGroup
+ fields = (
+ "pk",
+ "name",
+ "type",
+ "since",
+ "until",
+ "contact_address",
+ )
diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -1,7 +1,10 @@
from rest_framework import serializers
from rest_framework.reverse import reverse
-from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
+from activemembers.api.v2.serializers.member_group import (
+ MemberGroupSerializer,
+ MemberGroupShortSerializer,
+)
from documents.api.v2.serializers.document import DocumentSerializer
from events import services
from events.api.v2.serializers.event_registration import EventRegistrationSerializer
@@ -123,34 +126,4 @@
class EventListSerializer(EventSerializer):
- class Meta:
- model = Event
- fields = (
- "pk",
- "slug",
- "url",
- "title",
- "description",
- "caption",
- "start",
- "end",
- "category",
- "registration_start",
- "registration_end",
- "cancel_deadline",
- "optional_registrations",
- "location",
- "price",
- "fine",
- "num_participants",
- "max_participants",
- "no_registration_message",
- "registration_status",
- "cancel_too_late_message",
- "has_fields",
- "food_event",
- "maps_url",
- "user_permissions",
- "user_registration",
- "documents",
- )
+ organisers = MemberGroupShortSerializer(many=True)
|
{"golden_diff": "diff --git a/website/activemembers/api/v2/serializers/member_group.py b/website/activemembers/api/v2/serializers/member_group.py\n--- a/website/activemembers/api/v2/serializers/member_group.py\n+++ b/website/activemembers/api/v2/serializers/member_group.py\n@@ -72,3 +72,16 @@\n \"contact_address\",\n \"photo\",\n )\n+\n+\n+class MemberGroupShortSerializer(MemberGroupListSerializer):\n+ class Meta:\n+ model = MemberGroup\n+ fields = (\n+ \"pk\",\n+ \"name\",\n+ \"type\",\n+ \"since\",\n+ \"until\",\n+ \"contact_address\",\n+ )\ndiff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -1,7 +1,10 @@\n from rest_framework import serializers\n from rest_framework.reverse import reverse\n \n-from activemembers.api.v2.serializers.member_group import MemberGroupSerializer\n+from activemembers.api.v2.serializers.member_group import (\n+ MemberGroupSerializer,\n+ MemberGroupShortSerializer,\n+)\n from documents.api.v2.serializers.document import DocumentSerializer\n from events import services\n from events.api.v2.serializers.event_registration import EventRegistrationSerializer\n@@ -123,34 +126,4 @@\n \n \n class EventListSerializer(EventSerializer):\n- class Meta:\n- model = Event\n- fields = (\n- \"pk\",\n- \"slug\",\n- \"url\",\n- \"title\",\n- \"description\",\n- \"caption\",\n- \"start\",\n- \"end\",\n- \"category\",\n- \"registration_start\",\n- \"registration_end\",\n- \"cancel_deadline\",\n- \"optional_registrations\",\n- \"location\",\n- \"price\",\n- \"fine\",\n- \"num_participants\",\n- \"max_participants\",\n- \"no_registration_message\",\n- \"registration_status\",\n- \"cancel_too_late_message\",\n- \"has_fields\",\n- \"food_event\",\n- \"maps_url\",\n- \"user_permissions\",\n- \"user_registration\",\n- \"documents\",\n- )\n+ organisers = MemberGroupShortSerializer(many=True)\n", "issue": "Re-add rudamentary version of organisers to the event list API endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nAdd a id-name based list of event organisers to the event list API endpoint.\r\n\r\n### Motivation\r\nBecause it would make me and my Reaxit PR very happy.\r\n\r\n### Current implementation\r\nIt got removed :(\r\n\r\n### Suggested implementation\r\nJust do it Kappa\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group_membership import (\n MemberGroupMembershipSerializer,\n)\nfrom activemembers.models import MemberGroup\nfrom thaliawebsite.api.v2.serializers import ThumbnailSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass MemberGroupSerializer(CleanedModelSerializer):\n \"\"\"API serializer for member groups.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if \"get_memberships\" not in self.context and \"members\" in self.fields:\n self.fields.pop(\"members\")\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\n \"pk\",\n \"name\",\n \"type\",\n \"description\",\n \"since\",\n \"until\",\n \"contact_address\",\n \"photo\",\n \"members\",\n )\n\n members = serializers.SerializerMethodField(\"_members\")\n type = serializers.SerializerMethodField(\"_type\")\n photo = ThumbnailSerializer(placeholder=\"activemembers/images/placeholder.png\")\n\n def _members(self, instance):\n memberships = self.context[\"get_memberships\"](instance).prefetch_related(\n \"member__membergroupmembership_set\"\n )\n return MemberGroupMembershipSerializer(\n many=True, context=self.context\n ).to_representation(memberships)\n\n def _type(self, instance):\n if hasattr(instance, \"board\"):\n return \"board\"\n if hasattr(instance, \"committee\"):\n return \"committee\"\n if hasattr(instance, \"society\"):\n return \"society\"\n return None\n\n\nclass MemberGroupListSerializer(MemberGroupSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\n \"pk\",\n \"name\",\n \"type\",\n \"description\",\n \"since\",\n \"until\",\n \"contact_address\",\n \"photo\",\n )\n", "path": "website/activemembers/api/v2/serializers/member_group.py"}, {"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"slug\",\n \"url\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"registration_status\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n registration_status = serializers.SerializerMethodField(\"_registration_status\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n url = serializers.SerializerMethodField(\"_url\")\n\n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n return EventRegistrationSerializer(\n registration,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n return None\n\n def _registration_status(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n else:\n registration = None\n status = services.registration_status(\n instance, registration, self.context[\"request\"].member\n )\n cancel_status = services.cancel_status(instance, registration)\n\n status_str = services.registration_status_string(status, instance, registration)\n cancel_str = services.cancel_info_string(instance, cancel_status, status)\n if services.show_cancel_status(status) and cancel_str != \"\":\n return f\"{status_str} {cancel_str}\"\n return f\"{status_str}\"\n\n def _num_participants(self, instance: Event):\n if instance.max_participants:\n return min(instance.participant_count, instance.max_participants)\n return instance.participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n\n def _url(self, instance: Event):\n if instance.slug is None:\n return reverse(\n \"events:event\",\n kwargs={\"pk\": instance.pk},\n request=self.context[\"request\"],\n )\n return reverse(\n \"events:event\",\n kwargs={\"slug\": instance.slug},\n request=self.context[\"request\"],\n )\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n\n\nclass EventListSerializer(EventSerializer):\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"slug\",\n \"url\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"registration_status\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"documents\",\n )\n", "path": "website/events/api/v2/serializers/event.py"}]}
| 2,642 | 529 |
gh_patches_debug_28357
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-4200
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some OpenSSL bignum arithmetic operations are not in constant time
### What's wrong:
1. Some arithmetic operations exposed in the [OpenSSL bignum binding](https://github.com/pyca/cryptography/blob/master/src/_cffi_src/openssl/bignum.py) are not in constant time.
2. These functions are specifically:
1. `BN_div`
2. `BN_mod_inverse`
3. `BN_mod_exp`
### How to resolve:
1. The OpenSSL solution to this is to call `BN_set_flags` on the secret BIGNUM and set the `BN_FLG_CONSTTIME` flag. [The OpenSSL docs reference this here.](https://github.com/openssl/openssl/blob/master/include/openssl/bn.h#L61)
2. Expose the needed operations:
1. `BN_MONT_CTX` operations.
2. `BN_mod_exp_mont`, and `BN_mod_exp_mont_consttime`
3. `BN_set_flags`, `BN_get_flags`, and the `BN_FLG_CONSTTIME` flag.
### Notes:
1. Cryptography.io isn't affected by these timing issues. I have clarified with @alex and @reaperhulk.
2. If you are a downstream user utilizing these operations on private/secret values, then _you are affected_.
</issue>
<code>
[start of src/_cffi_src/openssl/bignum.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 INCLUDES = """
8 #include <openssl/bn.h>
9 """
10
11 TYPES = """
12 typedef ... BN_CTX;
13 typedef ... BIGNUM;
14 typedef int... BN_ULONG;
15 """
16
17 FUNCTIONS = """
18 BIGNUM *BN_new(void);
19 void BN_free(BIGNUM *);
20 void BN_clear_free(BIGNUM *);
21
22 int BN_rand(BIGNUM *, int, int, int);
23 int BN_rand_range(BIGNUM *, BIGNUM *);
24
25 BN_CTX *BN_CTX_new(void);
26 void BN_CTX_free(BN_CTX *);
27
28 void BN_CTX_start(BN_CTX *);
29 BIGNUM *BN_CTX_get(BN_CTX *);
30 void BN_CTX_end(BN_CTX *);
31
32 BIGNUM *BN_copy(BIGNUM *, const BIGNUM *);
33 BIGNUM *BN_dup(const BIGNUM *);
34
35 int BN_set_word(BIGNUM *, BN_ULONG);
36 BN_ULONG BN_get_word(const BIGNUM *);
37
38 const BIGNUM *BN_value_one(void);
39
40 char *BN_bn2hex(const BIGNUM *);
41 int BN_hex2bn(BIGNUM **, const char *);
42 int BN_dec2bn(BIGNUM **, const char *);
43
44 int BN_bn2bin(const BIGNUM *, unsigned char *);
45 BIGNUM *BN_bin2bn(const unsigned char *, int, BIGNUM *);
46
47 int BN_num_bits(const BIGNUM *);
48
49 int BN_cmp(const BIGNUM *, const BIGNUM *);
50 int BN_add(BIGNUM *, const BIGNUM *, const BIGNUM *);
51 int BN_sub(BIGNUM *, const BIGNUM *, const BIGNUM *);
52 int BN_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
53 int BN_sqr(BIGNUM *, const BIGNUM *, BN_CTX *);
54 int BN_div(BIGNUM *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
55 int BN_nnmod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
56 int BN_mod_add(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
57 BN_CTX *);
58 int BN_mod_sub(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
59 BN_CTX *);
60 int BN_mod_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
61 BN_CTX *);
62 int BN_mod_sqr(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
63 int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
64 int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
65 BN_CTX *);
66 int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
67 BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
68
69 int BN_set_bit(BIGNUM *, int);
70 int BN_clear_bit(BIGNUM *, int);
71
72 int BN_is_bit_set(const BIGNUM *, int);
73
74 int BN_mask_bits(BIGNUM *, int);
75
76 int BN_num_bytes(const BIGNUM *);
77
78 int BN_zero(BIGNUM *);
79 int BN_one(BIGNUM *);
80 int BN_mod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
81
82 int BN_lshift(BIGNUM *, const BIGNUM *, int);
83 int BN_lshift1(BIGNUM *, BIGNUM *);
84
85 int BN_rshift(BIGNUM *, BIGNUM *, int);
86 int BN_rshift1(BIGNUM *, BIGNUM *);
87 """
88
89 CUSTOMIZATIONS = """
90 """
91
[end of src/_cffi_src/openssl/bignum.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/_cffi_src/openssl/bignum.py b/src/_cffi_src/openssl/bignum.py
--- a/src/_cffi_src/openssl/bignum.py
+++ b/src/_cffi_src/openssl/bignum.py
@@ -10,11 +10,17 @@
TYPES = """
typedef ... BN_CTX;
+typedef ... BN_MONT_CTX;
typedef ... BIGNUM;
typedef int... BN_ULONG;
"""
FUNCTIONS = """
+#define BN_FLG_CONSTTIME ...
+
+void BN_set_flags(BIGNUM *, int);
+int BN_get_flags(const BIGNUM *, int);
+
BIGNUM *BN_new(void);
void BN_free(BIGNUM *);
void BN_clear_free(BIGNUM *);
@@ -29,6 +35,10 @@
BIGNUM *BN_CTX_get(BN_CTX *);
void BN_CTX_end(BN_CTX *);
+BN_MONT_CTX *BN_MONT_CTX_new(void);
+int BN_MONT_CTX_set(BN_MONT_CTX *, BIGNUM *, BN_CTX *);
+void BN_MONT_CTX_free(BN_MONT_CTX *);
+
BIGNUM *BN_copy(BIGNUM *, const BIGNUM *);
BIGNUM *BN_dup(const BIGNUM *);
@@ -63,6 +73,10 @@
int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
BN_CTX *);
+int BN_mod_exp_mont(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,
+ BN_CTX *, BN_MONT_CTX *);
+int BN_mod_exp_mont_consttime(BIGNUM *, const BIGNUM *, const BIGNUM *,
+ const BIGNUM *, BN_CTX *, BN_MONT_CTX *);
int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);
|
{"golden_diff": "diff --git a/src/_cffi_src/openssl/bignum.py b/src/_cffi_src/openssl/bignum.py\n--- a/src/_cffi_src/openssl/bignum.py\n+++ b/src/_cffi_src/openssl/bignum.py\n@@ -10,11 +10,17 @@\n \n TYPES = \"\"\"\n typedef ... BN_CTX;\n+typedef ... BN_MONT_CTX;\n typedef ... BIGNUM;\n typedef int... BN_ULONG;\n \"\"\"\n \n FUNCTIONS = \"\"\"\n+#define BN_FLG_CONSTTIME ...\n+\n+void BN_set_flags(BIGNUM *, int);\n+int BN_get_flags(const BIGNUM *, int);\n+\n BIGNUM *BN_new(void);\n void BN_free(BIGNUM *);\n void BN_clear_free(BIGNUM *);\n@@ -29,6 +35,10 @@\n BIGNUM *BN_CTX_get(BN_CTX *);\n void BN_CTX_end(BN_CTX *);\n \n+BN_MONT_CTX *BN_MONT_CTX_new(void);\n+int BN_MONT_CTX_set(BN_MONT_CTX *, BIGNUM *, BN_CTX *);\n+void BN_MONT_CTX_free(BN_MONT_CTX *);\n+\n BIGNUM *BN_copy(BIGNUM *, const BIGNUM *);\n BIGNUM *BN_dup(const BIGNUM *);\n \n@@ -63,6 +73,10 @@\n int BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n int BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\n+int BN_mod_exp_mont(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n+ BN_CTX *, BN_MONT_CTX *);\n+int BN_mod_exp_mont_consttime(BIGNUM *, const BIGNUM *, const BIGNUM *,\n+ const BIGNUM *, BN_CTX *, BN_MONT_CTX *);\n int BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n BIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n", "issue": "Some OpenSSL bignum arithmetic operations are not in constant time\n### What's wrong:\r\n1. Some arithmetic operations exposed in the [OpenSSL bignum binding](https://github.com/pyca/cryptography/blob/master/src/_cffi_src/openssl/bignum.py) are not in constant time.\r\n2. These functions are specifically:\r\n 1. `BN_div`\r\n 2. `BN_mod_inverse`\r\n 3. `BN_mod_exp`\r\n\r\n### How to resolve:\r\n1. The OpenSSL solution to this is to call `BN_set_flags` on the secret BIGNUM and set the `BN_FLG_CONSTTIME` flag. [The OpenSSL docs reference this here.](https://github.com/openssl/openssl/blob/master/include/openssl/bn.h#L61)\r\n2. Expose the needed operations:\r\n 1. `BN_MONT_CTX` operations.\r\n 2. `BN_mod_exp_mont`, and `BN_mod_exp_mont_consttime` \r\n 3. `BN_set_flags`, `BN_get_flags`, and the `BN_FLG_CONSTTIME` flag.\r\n\r\n### Notes:\r\n1. Cryptography.io isn't affected by these timing issues. I have clarified with @alex and @reaperhulk.\r\n2. If you are a downstream user utilizing these operations on private/secret values, then _you are affected_.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/bn.h>\n\"\"\"\n\nTYPES = \"\"\"\ntypedef ... BN_CTX;\ntypedef ... BIGNUM;\ntypedef int... BN_ULONG;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nBIGNUM *BN_new(void);\nvoid BN_free(BIGNUM *);\nvoid BN_clear_free(BIGNUM *);\n\nint BN_rand(BIGNUM *, int, int, int);\nint BN_rand_range(BIGNUM *, BIGNUM *);\n\nBN_CTX *BN_CTX_new(void);\nvoid BN_CTX_free(BN_CTX *);\n\nvoid BN_CTX_start(BN_CTX *);\nBIGNUM *BN_CTX_get(BN_CTX *);\nvoid BN_CTX_end(BN_CTX *);\n\nBIGNUM *BN_copy(BIGNUM *, const BIGNUM *);\nBIGNUM *BN_dup(const BIGNUM *);\n\nint BN_set_word(BIGNUM *, BN_ULONG);\nBN_ULONG BN_get_word(const BIGNUM *);\n\nconst BIGNUM *BN_value_one(void);\n\nchar *BN_bn2hex(const BIGNUM *);\nint BN_hex2bn(BIGNUM **, const char *);\nint BN_dec2bn(BIGNUM **, const char *);\n\nint BN_bn2bin(const BIGNUM *, unsigned char *);\nBIGNUM *BN_bin2bn(const unsigned char *, int, BIGNUM *);\n\nint BN_num_bits(const BIGNUM *);\n\nint BN_cmp(const BIGNUM *, const BIGNUM *);\nint BN_add(BIGNUM *, const BIGNUM *, const BIGNUM *);\nint BN_sub(BIGNUM *, const BIGNUM *, const BIGNUM *);\nint BN_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_sqr(BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_div(BIGNUM *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_nnmod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_mod_add(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_sub(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_mul(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_mod_sqr(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nint BN_mod_exp(BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *,\n BN_CTX *);\nint BN_gcd(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\nBIGNUM *BN_mod_inverse(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n\nint BN_set_bit(BIGNUM *, int);\nint BN_clear_bit(BIGNUM *, int);\n\nint BN_is_bit_set(const BIGNUM *, int);\n\nint BN_mask_bits(BIGNUM *, int);\n\nint BN_num_bytes(const BIGNUM *);\n\nint BN_zero(BIGNUM *);\nint BN_one(BIGNUM *);\nint BN_mod(BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *);\n\nint BN_lshift(BIGNUM *, const BIGNUM *, int);\nint BN_lshift1(BIGNUM *, BIGNUM *);\n\nint BN_rshift(BIGNUM *, BIGNUM *, int);\nint BN_rshift1(BIGNUM *, BIGNUM *);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n", "path": "src/_cffi_src/openssl/bignum.py"}]}
| 1,812 | 443 |
gh_patches_debug_14895
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-619
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
densityplot warning tight_layout
plot density returns the following warning:
_UserWarning: This figure was using constrained_layout==True, but that is incompatible with subplots_adjust and or tight_layout: setting constrained_layout==False.
warnings.warn("This figure was using constrained_layout==True, ")_
The solution is to do not use tight_layout()
</issue>
<code>
[start of arviz/plots/densityplot.py]
1 """KDE and histogram plots for multiple variables."""
2 import numpy as np
3
4 from ..data import convert_to_dataset
5 from ..stats import hpd
6 from .kdeplot import _fast_kde
7 from .plot_utils import (
8 _scale_fig_size,
9 make_label,
10 xarray_var_iter,
11 default_grid,
12 _create_axes_grid,
13 )
14 from ..utils import _var_names
15
16
17 # pylint:disable-msg=too-many-function-args
18 def plot_density(
19 data,
20 group="posterior",
21 data_labels=None,
22 var_names=None,
23 credible_interval=0.94,
24 point_estimate="mean",
25 colors="cycle",
26 outline=True,
27 hpd_markers="",
28 shade=0.0,
29 bw=4.5,
30 figsize=None,
31 textsize=None,
32 ):
33 """Generate KDE plots for continuous variables and histograms for discrete ones.
34
35 Plots are truncated at their 100*(1-alpha)% credible intervals. Plots are grouped per variable
36 and colors assigned to models.
37
38 Parameters
39 ----------
40 data : Union[Object, Iterator[Object]]
41 Any object that can be converted to an az.InferenceData object, or an Iterator returning
42 a sequence of such objects.
43 Refer to documentation of az.convert_to_dataset for details about such objects.
44 group: Optional[str]
45 Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
46 Alternative values include 'prior' and any other strings used as dataset keys in the
47 InferenceData.
48 data_labels : Optional[List[str]]
49 List with names for the datasets passed as "data." Useful when plotting more than one
50 dataset. Must be the same shape as the data parameter. Defaults to None.
51 var_names: Optional[List[str]]
52 List of variables to plot. If multiple datasets are supplied and var_names is not None,
53 will print the same set of variables for each dataset. Defaults to None, which results in
54 all the variables being plotted.
55 credible_interval : float
56 Credible intervals. Should be in the interval (0, 1]. Defaults to 0.94.
57 point_estimate : Optional[str]
58 Plot point estimate per variable. Values should be 'mean', 'median' or None.
59 Defaults to 'mean'.
60 colors : Optional[Union[List[str],str]]
61 List with valid matplotlib colors, one color per model. Alternative a string can be passed.
62 If the string is `cycle`, it will automatically choose a color per model from matplolib's
63 cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
64 models. Defaults to `cycle`.
65 outline : bool
66 Use a line to draw KDEs and histograms. Default to True
67 hpd_markers : str
68 A valid `matplotlib.markers` like 'v', used to indicate the limits of the hpd interval.
69 Defaults to empty string (no marker).
70 shade : Optional[float]
71 Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
72 (opaque). Defaults to 0.
73 bw : Optional[float]
74 Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
75 smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
76 of thumb (the default rule used by SciPy).
77 figsize : Optional[Tuple[int, int]]
78 Figure size. If None it will be defined automatically.
79 textsize: Optional[float]
80 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
81 on figsize.
82
83 Returns
84 -------
85 ax : Matplotlib axes
86
87
88 Examples
89 --------
90 Plot default density plot
91
92 .. plot::
93 :context: close-figs
94
95 >>> import arviz as az
96 >>> centered = az.load_arviz_data('centered_eight')
97 >>> non_centered = az.load_arviz_data('non_centered_eight')
98 >>> az.plot_density([centered, non_centered])
99
100 Plot subset variables by specifying variable name exactly
101
102 .. plot::
103 :context: close-figs
104
105 >>> az.plot_density([centered, non_centered], var_names=["mu"])
106
107 Plot a specific `az.InferenceData` group
108
109 .. plot::
110 :context: close-figs
111
112 >>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior")
113
114 Specify credible interval
115
116 .. plot::
117 :context: close-figs
118
119 >>> az.plot_density([centered, non_centered], var_names=["mu"], credible_interval=.5)
120
121 Shade plots and/or remove outlines
122
123 .. plot::
124 :context: close-figs
125
126 >>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8)
127
128 Specify binwidth for kernel density estimation
129
130 .. plot::
131 :context: close-figs
132
133 >>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9)
134 """
135 if not isinstance(data, (list, tuple)):
136 datasets = [convert_to_dataset(data, group=group)]
137 else:
138 datasets = [convert_to_dataset(datum, group=group) for datum in data]
139
140 var_names = _var_names(var_names, datasets)
141
142 if point_estimate not in ("mean", "median", None):
143 raise ValueError(
144 "Point estimate should be 'mean'," "median' or None, not {}".format(point_estimate)
145 )
146
147 n_data = len(datasets)
148
149 if data_labels is None:
150 if n_data > 1:
151 data_labels = ["{}".format(idx) for idx in range(n_data)]
152 else:
153 data_labels = [""]
154 elif len(data_labels) != n_data:
155 raise ValueError(
156 "The number of names for the models ({}) "
157 "does not match the number of models ({})".format(len(data_labels), n_data)
158 )
159
160 if colors == "cycle":
161 colors = ["C{}".format(idx % 10) for idx in range(n_data)]
162 elif isinstance(colors, str):
163 colors = [colors for _ in range(n_data)]
164
165 if not 1 >= credible_interval > 0:
166 raise ValueError("The value of credible_interval should be in the interval (0, 1]")
167
168 to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
169 all_labels = []
170 length_plotters = []
171 for plotters in to_plot:
172 length_plotters.append(len(plotters))
173 for var_name, selection, _ in plotters:
174 label = make_label(var_name, selection)
175 if label not in all_labels:
176 all_labels.append(label)
177 length_plotters = max(length_plotters)
178 rows, cols = default_grid(length_plotters, max_cols=3)
179
180 (figsize, _, titlesize, xt_labelsize, linewidth, markersize) = _scale_fig_size(
181 figsize, textsize, rows, cols
182 )
183
184 fig, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)
185
186 axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}
187 for m_idx, plotters in enumerate(to_plot):
188 for var_name, selection, values in plotters:
189 label = make_label(var_name, selection)
190 _d_helper(
191 values.flatten(),
192 label,
193 colors[m_idx],
194 bw,
195 titlesize,
196 xt_labelsize,
197 linewidth,
198 markersize,
199 credible_interval,
200 point_estimate,
201 hpd_markers,
202 outline,
203 shade,
204 axis_map[label],
205 )
206
207 if n_data > 1:
208 for m_idx, label in enumerate(data_labels):
209 ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
210 ax[0].legend(fontsize=xt_labelsize)
211
212 fig.tight_layout()
213
214 return ax
215
216
217 def _d_helper(
218 vec,
219 vname,
220 color,
221 bw,
222 titlesize,
223 xt_labelsize,
224 linewidth,
225 markersize,
226 credible_interval,
227 point_estimate,
228 hpd_markers,
229 outline,
230 shade,
231 ax,
232 ):
233 """Plot an individual dimension.
234
235 Parameters
236 ----------
237 vec : array
238 1D array from trace
239 vname : str
240 variable name
241 color : str
242 matplotlib color
243 bw : float
244 Bandwidth scaling factor. Should be larger than 0. The higher this number the smoother the
245 KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb
246 (the default used rule by SciPy).
247 titlesize : float
248 font size for title
249 xt_labelsize : float
250 fontsize for xticks
251 linewidth : float
252 Thickness of lines
253 markersize : float
254 Size of markers
255 credible_interval : float
256 Credible intervals. Defaults to 0.94
257 point_estimate : str or None
258 'mean' or 'median'
259 shade : float
260 Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
261 (opaque). Defaults to 0.
262 ax : matplotlib axes
263 """
264 if vec.dtype.kind == "f":
265 if credible_interval != 1:
266 hpd_ = hpd(vec, credible_interval)
267 new_vec = vec[(vec >= hpd_[0]) & (vec <= hpd_[1])]
268 else:
269 new_vec = vec
270
271 density, xmin, xmax = _fast_kde(new_vec, bw=bw)
272 density *= credible_interval
273 x = np.linspace(xmin, xmax, len(density))
274 ymin = density[0]
275 ymax = density[-1]
276
277 if outline:
278 ax.plot(x, density, color=color, lw=linewidth)
279 ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls="-", lw=linewidth)
280 ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls="-", lw=linewidth)
281
282 if shade:
283 ax.fill_between(x, density, color=color, alpha=shade)
284
285 else:
286 xmin, xmax = hpd(vec, credible_interval)
287 bins = range(xmin, xmax + 2)
288 if outline:
289 ax.hist(vec, bins=bins, color=color, histtype="step", align="left")
290 if shade:
291 ax.hist(vec, bins=bins, color=color, alpha=shade)
292
293 if hpd_markers:
294 ax.plot(xmin, 0, hpd_markers, color=color, markeredgecolor="k", markersize=markersize)
295 ax.plot(xmax, 0, hpd_markers, color=color, markeredgecolor="k", markersize=markersize)
296
297 if point_estimate is not None:
298 if point_estimate == "mean":
299 est = np.mean(vec)
300 elif point_estimate == "median":
301 est = np.median(vec)
302 ax.plot(est, 0, "o", color=color, markeredgecolor="k", markersize=markersize)
303
304 ax.set_yticks([])
305 ax.set_title(vname, fontsize=titlesize, wrap=True)
306 for pos in ["left", "right", "top"]:
307 ax.spines[pos].set_visible(False)
308 ax.tick_params(labelsize=xt_labelsize)
309
[end of arviz/plots/densityplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py
--- a/arviz/plots/densityplot.py
+++ b/arviz/plots/densityplot.py
@@ -181,7 +181,7 @@
figsize, textsize, rows, cols
)
- fig, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)
+ _, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)
axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}
for m_idx, plotters in enumerate(to_plot):
@@ -209,8 +209,6 @@
ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
ax[0].legend(fontsize=xt_labelsize)
- fig.tight_layout()
-
return ax
|
{"golden_diff": "diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py\n--- a/arviz/plots/densityplot.py\n+++ b/arviz/plots/densityplot.py\n@@ -181,7 +181,7 @@\n figsize, textsize, rows, cols\n )\n \n- fig, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)\n+ _, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)\n \n axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}\n for m_idx, plotters in enumerate(to_plot):\n@@ -209,8 +209,6 @@\n ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)\n ax[0].legend(fontsize=xt_labelsize)\n \n- fig.tight_layout()\n-\n return ax\n", "issue": "densityplot warning tight_layout\nplot density returns the following warning:\r\n\r\n_UserWarning: This figure was using constrained_layout==True, but that is incompatible with subplots_adjust and or tight_layout: setting constrained_layout==False. \r\n warnings.warn(\"This figure was using constrained_layout==True, \")_\r\n\r\nThe solution is to do not use tight_layout()\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"KDE and histogram plots for multiple variables.\"\"\"\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom ..stats import hpd\nfrom .kdeplot import _fast_kde\nfrom .plot_utils import (\n _scale_fig_size,\n make_label,\n xarray_var_iter,\n default_grid,\n _create_axes_grid,\n)\nfrom ..utils import _var_names\n\n\n# pylint:disable-msg=too-many-function-args\ndef plot_density(\n data,\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n credible_interval=0.94,\n point_estimate=\"mean\",\n colors=\"cycle\",\n outline=True,\n hpd_markers=\"\",\n shade=0.0,\n bw=4.5,\n figsize=None,\n textsize=None,\n):\n \"\"\"Generate KDE plots for continuous variables and histograms for discrete ones.\n\n Plots are truncated at their 100*(1-alpha)% credible intervals. Plots are grouped per variable\n and colors assigned to models.\n\n Parameters\n ----------\n data : Union[Object, Iterator[Object]]\n Any object that can be converted to an az.InferenceData object, or an Iterator returning\n a sequence of such objects.\n Refer to documentation of az.convert_to_dataset for details about such objects.\n group: Optional[str]\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n Alternative values include 'prior' and any other strings used as dataset keys in the\n InferenceData.\n data_labels : Optional[List[str]]\n List with names for the datasets passed as \"data.\" Useful when plotting more than one\n dataset. Must be the same shape as the data parameter. Defaults to None.\n var_names: Optional[List[str]]\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n credible_interval : float\n Credible intervals. Should be in the interval (0, 1]. Defaults to 0.94.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median' or None.\n Defaults to 'mean'.\n colors : Optional[Union[List[str],str]]\n List with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically choose a color per model from matplolib's\n cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all\n models. Defaults to `cycle`.\n outline : bool\n Use a line to draw KDEs and histograms. Default to True\n hpd_markers : str\n A valid `matplotlib.markers` like 'v', used to indicate the limits of the hpd interval.\n Defaults to empty string (no marker).\n shade : Optional[float]\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n bw : Optional[float]\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy).\n figsize : Optional[Tuple[int, int]]\n Figure size. If None it will be defined automatically.\n textsize: Optional[float]\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n\n Returns\n -------\n ax : Matplotlib axes\n\n\n Examples\n --------\n Plot default density plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> non_centered = az.load_arviz_data('non_centered_eight')\n >>> az.plot_density([centered, non_centered])\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"])\n\n Plot a specific `az.InferenceData` group\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], group=\"prior\")\n\n Specify credible interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], credible_interval=.5)\n\n Shade plots and/or remove outlines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], outline=False, shade=.8)\n\n Specify binwidth for kernel density estimation\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], bw=.9)\n \"\"\"\n if not isinstance(data, (list, tuple)):\n datasets = [convert_to_dataset(data, group=group)]\n else:\n datasets = [convert_to_dataset(datum, group=group) for datum in data]\n\n var_names = _var_names(var_names, datasets)\n\n if point_estimate not in (\"mean\", \"median\", None):\n raise ValueError(\n \"Point estimate should be 'mean',\" \"median' or None, not {}\".format(point_estimate)\n )\n\n n_data = len(datasets)\n\n if data_labels is None:\n if n_data > 1:\n data_labels = [\"{}\".format(idx) for idx in range(n_data)]\n else:\n data_labels = [\"\"]\n elif len(data_labels) != n_data:\n raise ValueError(\n \"The number of names for the models ({}) \"\n \"does not match the number of models ({})\".format(len(data_labels), n_data)\n )\n\n if colors == \"cycle\":\n colors = [\"C{}\".format(idx % 10) for idx in range(n_data)]\n elif isinstance(colors, str):\n colors = [colors for _ in range(n_data)]\n\n if not 1 >= credible_interval > 0:\n raise ValueError(\"The value of credible_interval should be in the interval (0, 1]\")\n\n to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]\n all_labels = []\n length_plotters = []\n for plotters in to_plot:\n length_plotters.append(len(plotters))\n for var_name, selection, _ in plotters:\n label = make_label(var_name, selection)\n if label not in all_labels:\n all_labels.append(label)\n length_plotters = max(length_plotters)\n rows, cols = default_grid(length_plotters, max_cols=3)\n\n (figsize, _, titlesize, xt_labelsize, linewidth, markersize) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n\n fig, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)\n\n axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}\n for m_idx, plotters in enumerate(to_plot):\n for var_name, selection, values in plotters:\n label = make_label(var_name, selection)\n _d_helper(\n values.flatten(),\n label,\n colors[m_idx],\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n credible_interval,\n point_estimate,\n hpd_markers,\n outline,\n shade,\n axis_map[label],\n )\n\n if n_data > 1:\n for m_idx, label in enumerate(data_labels):\n ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)\n ax[0].legend(fontsize=xt_labelsize)\n\n fig.tight_layout()\n\n return ax\n\n\ndef _d_helper(\n vec,\n vname,\n color,\n bw,\n titlesize,\n xt_labelsize,\n linewidth,\n markersize,\n credible_interval,\n point_estimate,\n hpd_markers,\n outline,\n shade,\n ax,\n):\n \"\"\"Plot an individual dimension.\n\n Parameters\n ----------\n vec : array\n 1D array from trace\n vname : str\n variable name\n color : str\n matplotlib color\n bw : float\n Bandwidth scaling factor. Should be larger than 0. The higher this number the smoother the\n KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule of thumb\n (the default used rule by SciPy).\n titlesize : float\n font size for title\n xt_labelsize : float\n fontsize for xticks\n linewidth : float\n Thickness of lines\n markersize : float\n Size of markers\n credible_interval : float\n Credible intervals. Defaults to 0.94\n point_estimate : str or None\n 'mean' or 'median'\n shade : float\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n ax : matplotlib axes\n \"\"\"\n if vec.dtype.kind == \"f\":\n if credible_interval != 1:\n hpd_ = hpd(vec, credible_interval)\n new_vec = vec[(vec >= hpd_[0]) & (vec <= hpd_[1])]\n else:\n new_vec = vec\n\n density, xmin, xmax = _fast_kde(new_vec, bw=bw)\n density *= credible_interval\n x = np.linspace(xmin, xmax, len(density))\n ymin = density[0]\n ymax = density[-1]\n\n if outline:\n ax.plot(x, density, color=color, lw=linewidth)\n ax.plot([xmin, xmin], [-ymin / 100, ymin], color=color, ls=\"-\", lw=linewidth)\n ax.plot([xmax, xmax], [-ymax / 100, ymax], color=color, ls=\"-\", lw=linewidth)\n\n if shade:\n ax.fill_between(x, density, color=color, alpha=shade)\n\n else:\n xmin, xmax = hpd(vec, credible_interval)\n bins = range(xmin, xmax + 2)\n if outline:\n ax.hist(vec, bins=bins, color=color, histtype=\"step\", align=\"left\")\n if shade:\n ax.hist(vec, bins=bins, color=color, alpha=shade)\n\n if hpd_markers:\n ax.plot(xmin, 0, hpd_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n ax.plot(xmax, 0, hpd_markers, color=color, markeredgecolor=\"k\", markersize=markersize)\n\n if point_estimate is not None:\n if point_estimate == \"mean\":\n est = np.mean(vec)\n elif point_estimate == \"median\":\n est = np.median(vec)\n ax.plot(est, 0, \"o\", color=color, markeredgecolor=\"k\", markersize=markersize)\n\n ax.set_yticks([])\n ax.set_title(vname, fontsize=titlesize, wrap=True)\n for pos in [\"left\", \"right\", \"top\"]:\n ax.spines[pos].set_visible(False)\n ax.tick_params(labelsize=xt_labelsize)\n", "path": "arviz/plots/densityplot.py"}]}
| 3,972 | 222 |
gh_patches_debug_19759
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-761
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide default gpu resource name and validation
Currently users can only pass "gpu" as part of the resource name. However, k8s requires it to be either `"nvidia.com/gpu"` or `"amd.com/gpu"` if AMD plugin is enabled. There are other different vendors as well but a pattern to use for validation would be `"<vendor>.com/gpu"`.
We should consider adding `"nvidia.com/gpu"` as the default and validate for user provided gpu resource name based on the pattern `"<vendor>.com/gpu"`.
Reference: https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
</issue>
<code>
[start of elasticdl/python/elasticdl/common/k8s_utils.py]
1 import re
2
3
4 _ALLOWED_RESOURCE_TYPES = ["memory", "disk", "ephemeral-storage", "cpu", "gpu"]
5
6
7 def _is_numeric(n):
8 try:
9 float(n)
10 except ValueError:
11 return False
12 return True
13
14
15 def _valid_gpu_spec(gpu_str):
16 if not gpu_str.isnumeric():
17 raise ValueError("invalid gpu request spec: " + gpu_str)
18 return gpu_str
19
20
21 def _valid_cpu_spec(cpu_str):
22 regexp = re.compile("([1-9]{1})([0-9]*)m$")
23 if not regexp.match(cpu_str) and not _is_numeric(cpu_str):
24 raise ValueError("invalid cpu request spec: " + cpu_str)
25 return cpu_str
26
27
28 def _valid_mem_spec(mem_str):
29 regexp = re.compile("([1-9]{1})([0-9]*)(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$")
30 if not regexp.match(mem_str):
31 raise ValueError("invalid memory request spec: " + mem_str)
32 return mem_str
33
34
35 def parse_resource(resource_str):
36 """Parse combined k8s resource string into a dict.
37
38 Args:
39 resource_str: The string representation for k8s resource,
40 e.g. "cpu=250m,memory=32Mi,disk=64Mi,gpu=1,ephemeral-storage=32Mi".
41
42 Return:
43 A Python dictionary parsed from the given resource string.
44 """
45 kvs = resource_str.split(",")
46 resource_names = []
47 parsed_res_dict = {}
48 for kv in kvs:
49 k, v = kv.split("=")
50 if k not in resource_names:
51 resource_names.append(k)
52 else:
53 raise ValueError(
54 "The resource string contains duplicate resource names: %s" % k
55 )
56 if k in ["memory", "disk", "ephemeral-storage"]:
57 _valid_mem_spec(v)
58 elif k == "cpu":
59 _valid_cpu_spec(v)
60 elif k == "gpu":
61 _valid_gpu_spec(v)
62 else:
63 raise ValueError(
64 "%s is not in the allowed list of resource types: %s"
65 % (k, _ALLOWED_RESOURCE_TYPES)
66 )
67 parsed_res_dict[k] = v
68 return parsed_res_dict
69
[end of elasticdl/python/elasticdl/common/k8s_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/python/elasticdl/common/k8s_utils.py b/elasticdl/python/elasticdl/common/k8s_utils.py
--- a/elasticdl/python/elasticdl/common/k8s_utils.py
+++ b/elasticdl/python/elasticdl/common/k8s_utils.py
@@ -2,6 +2,10 @@
_ALLOWED_RESOURCE_TYPES = ["memory", "disk", "ephemeral-storage", "cpu", "gpu"]
+# Any domain name is (syntactically) valid if it's a dot-separated list of
+# identifiers, each no longer than 63 characters, and made up of letters,
+# digits and dashes (no underscores).
+_GPU_VENDOR_REGEX_STR = r"^[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})*/gpu$"
def _is_numeric(n):
@@ -57,7 +61,14 @@
_valid_mem_spec(v)
elif k == "cpu":
_valid_cpu_spec(v)
- elif k == "gpu":
+ elif "gpu" in k:
+ if k == "gpu":
+ k = "nvidia.com/gpu"
+ elif not re.compile(_GPU_VENDOR_REGEX_STR).match(k):
+ raise ValueError(
+ "gpu resource name does not have a valid vendor name: %s"
+ % k
+ )
_valid_gpu_spec(v)
else:
raise ValueError(
|
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/common/k8s_utils.py b/elasticdl/python/elasticdl/common/k8s_utils.py\n--- a/elasticdl/python/elasticdl/common/k8s_utils.py\n+++ b/elasticdl/python/elasticdl/common/k8s_utils.py\n@@ -2,6 +2,10 @@\n \n \n _ALLOWED_RESOURCE_TYPES = [\"memory\", \"disk\", \"ephemeral-storage\", \"cpu\", \"gpu\"]\n+# Any domain name is (syntactically) valid if it's a dot-separated list of\n+# identifiers, each no longer than 63 characters, and made up of letters,\n+# digits and dashes (no underscores).\n+_GPU_VENDOR_REGEX_STR = r\"^[a-zA-Z\\d-]{,63}(\\.[a-zA-Z\\d-]{,63})*/gpu$\"\n \n \n def _is_numeric(n):\n@@ -57,7 +61,14 @@\n _valid_mem_spec(v)\n elif k == \"cpu\":\n _valid_cpu_spec(v)\n- elif k == \"gpu\":\n+ elif \"gpu\" in k:\n+ if k == \"gpu\":\n+ k = \"nvidia.com/gpu\"\n+ elif not re.compile(_GPU_VENDOR_REGEX_STR).match(k):\n+ raise ValueError(\n+ \"gpu resource name does not have a valid vendor name: %s\"\n+ % k\n+ )\n _valid_gpu_spec(v)\n else:\n raise ValueError(\n", "issue": "Provide default gpu resource name and validation\nCurrently users can only pass \"gpu\" as part of the resource name. However, k8s requires it to be either `\"nvidia.com/gpu\"` or `\"amd.com/gpu\"` if AMD plugin is enabled. There are other different vendors as well but a pattern to use for validation would be `\"<vendor>.com/gpu\"`.\r\n\r\nWe should consider adding `\"nvidia.com/gpu\"` as the default and validate for user provided gpu resource name based on the pattern `\"<vendor>.com/gpu\"`.\r\n\r\nReference: https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/\n", "before_files": [{"content": "import re\n\n\n_ALLOWED_RESOURCE_TYPES = [\"memory\", \"disk\", \"ephemeral-storage\", \"cpu\", \"gpu\"]\n\n\ndef _is_numeric(n):\n try:\n float(n)\n except ValueError:\n return False\n return True\n\n\ndef _valid_gpu_spec(gpu_str):\n if not gpu_str.isnumeric():\n raise ValueError(\"invalid gpu request spec: \" + gpu_str)\n return gpu_str\n\n\ndef _valid_cpu_spec(cpu_str):\n regexp = re.compile(\"([1-9]{1})([0-9]*)m$\")\n if not regexp.match(cpu_str) and not _is_numeric(cpu_str):\n raise ValueError(\"invalid cpu request spec: \" + cpu_str)\n return cpu_str\n\n\ndef _valid_mem_spec(mem_str):\n regexp = re.compile(\"([1-9]{1})([0-9]*)(E|P|T|G|M|K|Ei|Pi|Ti|Gi|Mi|Ki)$\")\n if not regexp.match(mem_str):\n raise ValueError(\"invalid memory request spec: \" + mem_str)\n return mem_str\n\n\ndef parse_resource(resource_str):\n \"\"\"Parse combined k8s resource string into a dict.\n\n Args:\n resource_str: The string representation for k8s resource,\n e.g. \"cpu=250m,memory=32Mi,disk=64Mi,gpu=1,ephemeral-storage=32Mi\".\n\n Return:\n A Python dictionary parsed from the given resource string.\n \"\"\"\n kvs = resource_str.split(\",\")\n resource_names = []\n parsed_res_dict = {}\n for kv in kvs:\n k, v = kv.split(\"=\")\n if k not in resource_names:\n resource_names.append(k)\n else:\n raise ValueError(\n \"The resource string contains duplicate resource names: %s\" % k\n )\n if k in [\"memory\", \"disk\", \"ephemeral-storage\"]:\n _valid_mem_spec(v)\n elif k == \"cpu\":\n _valid_cpu_spec(v)\n elif k == \"gpu\":\n _valid_gpu_spec(v)\n else:\n raise ValueError(\n \"%s is not in the allowed list of resource types: %s\"\n % (k, _ALLOWED_RESOURCE_TYPES)\n )\n parsed_res_dict[k] = v\n return parsed_res_dict\n", "path": "elasticdl/python/elasticdl/common/k8s_utils.py"}]}
| 1,316 | 318 |
gh_patches_debug_778
|
rasdani/github-patches
|
git_diff
|
microsoft__DeepSpeed-2611
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] pydantic DeepSpeedConfigModel has no validator for <class:object>
**Describe the bug**
During ```from deepspeed.inference.config import DtypeEnum```, got error
```
File "pydantic/main.py", line 299, in pydantic.main.ModelMetaclass.__new__
File "pydantic/fields.py", line 411, in pydantic.fields.ModelField.infer
File "pydantic/fields.py", line 342, in pydantic.fields.ModelField.__init__
File "pydantic/fields.py", line 456, in pydantic.fields.ModelField.prepare
File "pydantic/fields.py", line 670, in pydantic.fields.ModelField.populate_validators
File "pydantic/validators.py", line 715, in find_validators
RuntimeError: no validator found for <class 'object'>, see `arbitrary_types_allowed` in Config
```
**To Reproduce**
Steps to reproduce the behavior:
1. Simple inference script to reproduce ```from deepspeed.inference.config import DtypeEnum```
2. pydantic 1.8.2, deepspeed 0.8.0+384f17b
**Expected behavior**
successful import with no error
**ds_report output**
cannot produce due to the same import error
**System info (please complete the following information):**
- OS: Red Hat Enterprise Linux Server 7.9 (Maipo)
- GPU count and types: one machine with 8 A100s, three machines with 8 A100s each
- Hugging Face Transformers 4.19.2, no accelerate
- Python version 3.8.13
</issue>
<code>
[start of deepspeed/runtime/config_utils.py]
1 """
2 Copyright (c) Microsoft Corporation
3 Licensed under the MIT license.
4 """
5 """
6 Collection of DeepSpeed configuration utilities
7 """
8 import json
9 import collections
10 import collections.abc
11 from functools import reduce
12 from pydantic import BaseModel
13 from deepspeed.utils import logger
14
15
16 class DeepSpeedConfigModel(BaseModel):
17 """
18 This class should be used as a base for all DeepSpeed configs. It extends
19 pydantic.BaseModel to allow for deprecated fields. To enable this feature,
20 add deprecated=True to pydantic.Field:
21
22 my_dep_field: int = Field(0, deprecated=True)
23
24 Deprecated Field kwargs:
25 - deprecated: [True|False], default False
26 Enables / Disables deprecated fields
27 - new_param: str, default ""
28 Name of the field replacing the deprecated field
29 - set_new_param: [True|False], default True
30 If new_param is provided, enables setting the value of that param with
31 deprecated field value
32 - new_param_fn: callable, default (lambda x: x)
33 If new_param is provided and set_new_param is True, this function will
34 modify the value of the deprecated field before placing that value in
35 the new_param field
36
37 Example:
38 my_new_field is replacing a deprecated my_old_field. The expected type
39 for my_new_field is int while the expected type for my_old_field is
40 str. We want to maintain backward compatibility with our configs, so we
41 define the fields with:
42
43 class MyExampleConfig(DeepSpeedConfigModel):
44 my_new_field: int = 0
45 my_old_field: str = Field('0',
46 deprecated=True,
47 new_param='my_new_field',
48 new_param_fn=(lambda x: int(x)))
49 """
50 def __init__(self, strict=False, **data):
51 if (
52 not strict
53 ): # This is temporary until we refactor all DS configs, allows HF to load models
54 data = {k: v for k, v in data.items() if v != "auto"}
55 super().__init__(**data)
56 self._deprecated_fields_check(self)
57
58 def _process_deprecated_field(self, pydantic_config, field):
59 # Get information about the deprecated field
60 fields_set = pydantic_config.__fields_set__
61 dep_param = field.name
62 kwargs = field.field_info.extra
63 new_param_fn = kwargs.get("new_param_fn", lambda x: x)
64 param_value = new_param_fn(getattr(pydantic_config, dep_param))
65 new_param = kwargs.get("new_param", "")
66 if dep_param in fields_set:
67 logger.warning(f"Config parameter {dep_param} is deprecated" +
68 (f" use {new_param} instead" if new_param else ""))
69 # Check if there is a new param and if it should be set with a value
70 if new_param and kwargs.get("set_new_param", True):
71 # Remove the deprecate field if there is a replacing field
72 try:
73 delattr(pydantic_config, dep_param)
74 except Exception as e:
75 logger.error(f"Tried removing deprecated '{dep_param}' from config")
76 raise e
77
78 # Set new param value
79 new_param_nested = new_param.split(".")
80 if len(new_param_nested) > 1:
81 # If the new param exists in a subconfig, we need to get
82 # the fields set for that subconfig
83 pydantic_config = reduce(getattr,
84 new_param_nested[:-1],
85 pydantic_config)
86 fields_set = pydantic_config.__fields_set__
87 new_param_name = new_param_nested[-1]
88 assert (
89 new_param_name not in fields_set
90 ), f"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together"
91 # A custom function for converting the old param value to new param value can be provided
92 try:
93 setattr(pydantic_config, new_param_name, param_value)
94 except Exception as e:
95 logger.error(
96 f"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'"
97 )
98 raise e
99
100 def _deprecated_fields_check(self, pydantic_config):
101 fields = pydantic_config.__fields__
102 for field in fields.values():
103 if field.field_info.extra.get("deprecated", False):
104 self._process_deprecated_field(pydantic_config, field)
105
106 class Config:
107 validate_all = True
108 validate_assignment = True
109 use_enum_values = True
110 allow_population_by_field_name = True
111 extra = "forbid"
112
113
114 class pp_int(int):
115 """
116 A wrapper for integers that will return a custom string or comma-formatted
117 string of the integer. For example, print(pp_int(1e5)) will return
118 "10,000". This is useful mainly for auto-generated documentation purposes.
119 """
120 def __new__(cls, val, custom_print_str=None):
121 inst = super().__new__(cls, val)
122 inst.custom_print_str = custom_print_str
123 return inst
124
125 def __repr__(self):
126 if self.custom_print_str:
127 return self.custom_print_str
128 return f"{self.real:,}"
129
130
131 # adapted from https://stackoverflow.com/a/50701137/9201239
132 class ScientificNotationEncoder(json.JSONEncoder):
133 """
134 This class overrides ``json.dumps`` default formatter.
135
136 This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation.
137
138 Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it
139
140 """
141 def iterencode(self, o, _one_shot=False, level=0):
142 indent = self.indent if self.indent is not None else 4
143 prefix_close = " " * level * indent
144 level += 1
145 prefix = " " * level * indent
146 if isinstance(o, bool):
147 return "true" if o else "false"
148 elif isinstance(o, float) or isinstance(o, int):
149 if o > 1e3:
150 return f"{o:e}"
151 else:
152 return f"{o}"
153 elif isinstance(o, collections.abc.Mapping):
154 x = [
155 f'\n{prefix}"{k}": {self.iterencode(v, level=level)}' for k,
156 v in o.items()
157 ]
158 return "{" + ", ".join(x) + f"\n{prefix_close}" + "}"
159 elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str):
160 return f"[{ f', '.join(map(self.iterencode, o)) }]"
161 return "\n, ".join(super().iterencode(o, _one_shot))
162
163
164 class DeepSpeedConfigObject(object):
165 """
166 For json serialization
167 """
168 def repr(self):
169 return self.__dict__
170
171 def __repr__(self):
172 return json.dumps(
173 self.__dict__,
174 sort_keys=True,
175 indent=4,
176 cls=ScientificNotationEncoder,
177 )
178
179
180 def get_scalar_param(param_dict, param_name, param_default_value):
181 return param_dict.get(param_name, param_default_value)
182
183
184 def get_list_param(param_dict, param_name, param_default_value):
185 return param_dict.get(param_name, param_default_value)
186
187
188 def get_dict_param(param_dict, param_name, param_default_value):
189 return param_dict.get(param_name, param_default_value)
190
191
192 def dict_raise_error_on_duplicate_keys(ordered_pairs):
193 """Reject duplicate keys."""
194 d = dict((k, v) for k, v in ordered_pairs)
195 if len(d) != len(ordered_pairs):
196 counter = collections.Counter([pair[0] for pair in ordered_pairs])
197 keys = [key for key, value in counter.items() if value > 1]
198 raise ValueError("Duplicate keys in DeepSpeed config: {}".format(keys))
199 return d
200
[end of deepspeed/runtime/config_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py
--- a/deepspeed/runtime/config_utils.py
+++ b/deepspeed/runtime/config_utils.py
@@ -109,6 +109,7 @@
use_enum_values = True
allow_population_by_field_name = True
extra = "forbid"
+ arbitrary_types_allowed = True
class pp_int(int):
|
{"golden_diff": "diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py\n--- a/deepspeed/runtime/config_utils.py\n+++ b/deepspeed/runtime/config_utils.py\n@@ -109,6 +109,7 @@\n use_enum_values = True\n allow_population_by_field_name = True\n extra = \"forbid\"\n+ arbitrary_types_allowed = True\n \n \n class pp_int(int):\n", "issue": "[BUG] pydantic DeepSpeedConfigModel has no validator for <class:object>\n**Describe the bug**\r\nDuring ```from deepspeed.inference.config import DtypeEnum```, got error \r\n```\r\n File \"pydantic/main.py\", line 299, in pydantic.main.ModelMetaclass.__new__\r\n File \"pydantic/fields.py\", line 411, in pydantic.fields.ModelField.infer\r\n File \"pydantic/fields.py\", line 342, in pydantic.fields.ModelField.__init__\r\n File \"pydantic/fields.py\", line 456, in pydantic.fields.ModelField.prepare\r\n File \"pydantic/fields.py\", line 670, in pydantic.fields.ModelField.populate_validators\r\n File \"pydantic/validators.py\", line 715, in find_validators\r\nRuntimeError: no validator found for <class 'object'>, see `arbitrary_types_allowed` in Config\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Simple inference script to reproduce ```from deepspeed.inference.config import DtypeEnum```\r\n2. pydantic 1.8.2, deepspeed 0.8.0+384f17b\r\n\r\n\r\n**Expected behavior**\r\nsuccessful import with no error\r\n\r\n**ds_report output**\r\ncannot produce due to the same import error\r\n\r\n**System info (please complete the following information):**\r\n - OS: Red Hat Enterprise Linux Server 7.9 (Maipo)\r\n - GPU count and types: one machine with 8 A100s, three machines with 8 A100s each\r\n - Hugging Face Transformers 4.19.2, no accelerate\r\n - Python version 3.8.13\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright (c) Microsoft Corporation\nLicensed under the MIT license.\n\"\"\"\n\"\"\"\nCollection of DeepSpeed configuration utilities\n\"\"\"\nimport json\nimport collections\nimport collections.abc\nfrom functools import reduce\nfrom pydantic import BaseModel\nfrom deepspeed.utils import logger\n\n\nclass DeepSpeedConfigModel(BaseModel):\n \"\"\"\n This class should be used as a base for all DeepSpeed configs. It extends\n pydantic.BaseModel to allow for deprecated fields. To enable this feature,\n add deprecated=True to pydantic.Field:\n\n my_dep_field: int = Field(0, deprecated=True)\n\n Deprecated Field kwargs:\n - deprecated: [True|False], default False\n Enables / Disables deprecated fields\n - new_param: str, default \"\"\n Name of the field replacing the deprecated field\n - set_new_param: [True|False], default True\n If new_param is provided, enables setting the value of that param with\n deprecated field value\n - new_param_fn: callable, default (lambda x: x)\n If new_param is provided and set_new_param is True, this function will\n modify the value of the deprecated field before placing that value in\n the new_param field\n\n Example:\n my_new_field is replacing a deprecated my_old_field. The expected type\n for my_new_field is int while the expected type for my_old_field is\n str. We want to maintain backward compatibility with our configs, so we\n define the fields with:\n\n class MyExampleConfig(DeepSpeedConfigModel):\n my_new_field: int = 0\n my_old_field: str = Field('0',\n deprecated=True,\n new_param='my_new_field',\n new_param_fn=(lambda x: int(x)))\n \"\"\"\n def __init__(self, strict=False, **data):\n if (\n not strict\n ): # This is temporary until we refactor all DS configs, allows HF to load models\n data = {k: v for k, v in data.items() if v != \"auto\"}\n super().__init__(**data)\n self._deprecated_fields_check(self)\n\n def _process_deprecated_field(self, pydantic_config, field):\n # Get information about the deprecated field\n fields_set = pydantic_config.__fields_set__\n dep_param = field.name\n kwargs = field.field_info.extra\n new_param_fn = kwargs.get(\"new_param_fn\", lambda x: x)\n param_value = new_param_fn(getattr(pydantic_config, dep_param))\n new_param = kwargs.get(\"new_param\", \"\")\n if dep_param in fields_set:\n logger.warning(f\"Config parameter {dep_param} is deprecated\" +\n (f\" use {new_param} instead\" if new_param else \"\"))\n # Check if there is a new param and if it should be set with a value\n if new_param and kwargs.get(\"set_new_param\", True):\n # Remove the deprecate field if there is a replacing field\n try:\n delattr(pydantic_config, dep_param)\n except Exception as e:\n logger.error(f\"Tried removing deprecated '{dep_param}' from config\")\n raise e\n\n # Set new param value\n new_param_nested = new_param.split(\".\")\n if len(new_param_nested) > 1:\n # If the new param exists in a subconfig, we need to get\n # the fields set for that subconfig\n pydantic_config = reduce(getattr,\n new_param_nested[:-1],\n pydantic_config)\n fields_set = pydantic_config.__fields_set__\n new_param_name = new_param_nested[-1]\n assert (\n new_param_name not in fields_set\n ), f\"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together\"\n # A custom function for converting the old param value to new param value can be provided\n try:\n setattr(pydantic_config, new_param_name, param_value)\n except Exception as e:\n logger.error(\n f\"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'\"\n )\n raise e\n\n def _deprecated_fields_check(self, pydantic_config):\n fields = pydantic_config.__fields__\n for field in fields.values():\n if field.field_info.extra.get(\"deprecated\", False):\n self._process_deprecated_field(pydantic_config, field)\n\n class Config:\n validate_all = True\n validate_assignment = True\n use_enum_values = True\n allow_population_by_field_name = True\n extra = \"forbid\"\n\n\nclass pp_int(int):\n \"\"\"\n A wrapper for integers that will return a custom string or comma-formatted\n string of the integer. For example, print(pp_int(1e5)) will return\n \"10,000\". This is useful mainly for auto-generated documentation purposes.\n \"\"\"\n def __new__(cls, val, custom_print_str=None):\n inst = super().__new__(cls, val)\n inst.custom_print_str = custom_print_str\n return inst\n\n def __repr__(self):\n if self.custom_print_str:\n return self.custom_print_str\n return f\"{self.real:,}\"\n\n\n# adapted from https://stackoverflow.com/a/50701137/9201239\nclass ScientificNotationEncoder(json.JSONEncoder):\n \"\"\"\n This class overrides ``json.dumps`` default formatter.\n\n This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation.\n\n Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it\n\n \"\"\"\n def iterencode(self, o, _one_shot=False, level=0):\n indent = self.indent if self.indent is not None else 4\n prefix_close = \" \" * level * indent\n level += 1\n prefix = \" \" * level * indent\n if isinstance(o, bool):\n return \"true\" if o else \"false\"\n elif isinstance(o, float) or isinstance(o, int):\n if o > 1e3:\n return f\"{o:e}\"\n else:\n return f\"{o}\"\n elif isinstance(o, collections.abc.Mapping):\n x = [\n f'\\n{prefix}\"{k}\": {self.iterencode(v, level=level)}' for k,\n v in o.items()\n ]\n return \"{\" + \", \".join(x) + f\"\\n{prefix_close}\" + \"}\"\n elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str):\n return f\"[{ f', '.join(map(self.iterencode, o)) }]\"\n return \"\\n, \".join(super().iterencode(o, _one_shot))\n\n\nclass DeepSpeedConfigObject(object):\n \"\"\"\n For json serialization\n \"\"\"\n def repr(self):\n return self.__dict__\n\n def __repr__(self):\n return json.dumps(\n self.__dict__,\n sort_keys=True,\n indent=4,\n cls=ScientificNotationEncoder,\n )\n\n\ndef get_scalar_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_list_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef get_dict_param(param_dict, param_name, param_default_value):\n return param_dict.get(param_name, param_default_value)\n\n\ndef dict_raise_error_on_duplicate_keys(ordered_pairs):\n \"\"\"Reject duplicate keys.\"\"\"\n d = dict((k, v) for k, v in ordered_pairs)\n if len(d) != len(ordered_pairs):\n counter = collections.Counter([pair[0] for pair in ordered_pairs])\n keys = [key for key, value in counter.items() if value > 1]\n raise ValueError(\"Duplicate keys in DeepSpeed config: {}\".format(keys))\n return d\n", "path": "deepspeed/runtime/config_utils.py"}]}
| 3,111 | 91 |
gh_patches_debug_43770
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-1384
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
duplicates: Do not report duplicates when only `mb_albumid` is duplicated
```
$ beet duplicates -F
/home/simon/Música/2 Unlimited/Get Ready/03 - The Magic Friend.mp3
/home/simon/Música/2 Unlimited/Get Ready/04 - Contrast.mp3
/home/simon/Música/2 Unlimited/Get Ready/05 - Desire.mp3
/home/simon/Música/2 Unlimited/Get Ready/06 - Pacific Walk.mp3
/home/simon/Música/2 Unlimited/Get Ready/07 - Workaholic.mp3
/home/simon/Música/2 Unlimited/Get Ready/08 - Rougher Than The Average.mp3
/home/simon/Música/2 Unlimited/Get Ready/09 - Delight.mp3
/home/simon/Música/2 Unlimited/Get Ready/10 - Eternally Yours.mp3
/home/simon/Música/2 Unlimited/Get Ready/11 - Twilight Zone (Rave Version Edit).mp3
/home/simon/Música/2 Unlimited/Get Ready/12 - Get Ready For This (Vocal Version).mp3
/home/simon/Música/2 Unlimited/Get Ready/13 - The Magic Friend (Instrumental).mp3
/home/simon/Música/2 Unlimited/Get Ready/14 - Workaholic (Instrumental).mp3
```
```
$ ll Música/2\ Unlimited/Get\ Ready/
total 83M
-rw-rw-r-- 1 simon simon 7,5M oct 10 16:00 03 - The Magic Friend.mp3
-rw-rw-r-- 1 simon simon 6,4M oct 10 16:00 04 - Contrast.mp3
-rw-rw-r-- 1 simon simon 7,9M oct 10 16:00 05 - Desire.mp3
-rw-rw-r-- 1 simon simon 5,7M oct 10 16:00 06 - Pacific Walk.mp3
-rw-rw-r-- 1 simon simon 7,2M oct 10 16:00 07 - Workaholic.mp3
-rw-rw-r-- 1 simon simon 7,6M oct 10 16:00 08 - Rougher Than The Average.mp3
-rw-rw-r-- 1 simon simon 6,5M oct 10 16:00 09 - Delight.mp3
-rw-rw-r-- 1 simon simon 7,6M oct 10 16:00 10 - Eternally Yours.mp3
-rw-rw-r-- 1 simon simon 7,1M oct 10 16:00 11 - Twilight Zone (Rave Version Edit).mp3
-rw-rw-r-- 1 simon simon 6,4M oct 10 16:00 12 - Get Ready For This (Vocal Version).mp3
-rw-rw-r-- 1 simon simon 5,9M oct 10 16:00 13 - The Magic Friend (Instrumental).mp3
-rw-rw-r-- 1 simon simon 7,2M oct 10 16:00 14 - Workaholic (Instrumental).mp3
-rw-rw-r-- 1 simon simon 30K oct 10 16:00 cover.jpg
```
What is the duplicate of that?
</issue>
<code>
[start of beetsplug/duplicates.py]
1 # This file is part of beets.
2 # Copyright 2015, Pedro Silva.
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """List duplicate tracks or albums.
16 """
17 from __future__ import (division, absolute_import, print_function,
18 unicode_literals)
19
20 import shlex
21
22 from beets.plugins import BeetsPlugin
23 from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError
24 from beets.util import command_output, displayable_path, subprocess
25
26 PLUGIN = 'duplicates'
27
28
29 def _process_item(item, lib, copy=False, move=False, delete=False,
30 tag=False, fmt=''):
31 """Process Item `item` in `lib`.
32 """
33 if copy:
34 item.move(basedir=copy, copy=True)
35 item.store()
36 if move:
37 item.move(basedir=move, copy=False)
38 item.store()
39 if delete:
40 item.remove(delete=True)
41 if tag:
42 try:
43 k, v = tag.split('=')
44 except:
45 raise UserError('%s: can\'t parse k=v tag: %s' % (PLUGIN, tag))
46 setattr(k, v)
47 item.store()
48 print_(format(item, fmt))
49
50
51 def _checksum(item, prog, log):
52 """Run external `prog` on file path associated with `item`, cache
53 output as flexattr on a key that is the name of the program, and
54 return the key, checksum tuple.
55 """
56 args = [p.format(file=item.path) for p in shlex.split(prog)]
57 key = args[0]
58 checksum = getattr(item, key, False)
59 if not checksum:
60 log.debug(u'{0}: key {1} on item {2} not cached: computing checksum',
61 PLUGIN, key, displayable_path(item.path))
62 try:
63 checksum = command_output(args)
64 setattr(item, key, checksum)
65 item.store()
66 log.debug(u'{0}: computed checksum for {1} using {2}',
67 PLUGIN, item.title, key)
68 except subprocess.CalledProcessError as e:
69 log.debug(u'{0}: failed to checksum {1}: {2}',
70 PLUGIN, displayable_path(item.path), e)
71 else:
72 log.debug(u'{0}: key {1} on item {2} cached: not computing checksum',
73 PLUGIN, key, displayable_path(item.path))
74 return key, checksum
75
76
77 def _group_by(objs, keys, log):
78 """Return a dictionary with keys arbitrary concatenations of attributes and
79 values lists of objects (Albums or Items) with those keys.
80 """
81 import collections
82 counts = collections.defaultdict(list)
83 for obj in objs:
84 values = [getattr(obj, k, None) for k in keys]
85 values = [v for v in values if v not in (None, '')]
86 if values:
87 key = '\001'.join(values)
88 counts[key].append(obj)
89 else:
90 log.debug(u'{0}: all keys {1} on item {2} are null: skipping',
91 PLUGIN, keys, displayable_path(obj.path))
92
93 return counts
94
95
96 def _duplicates(objs, keys, full, log):
97 """Generate triples of keys, duplicate counts, and constituent objects.
98 """
99 offset = 0 if full else 1
100 for k, objs in _group_by(objs, keys, log).iteritems():
101 if len(objs) > 1:
102 yield (k, len(objs) - offset, objs[offset:])
103
104
105 class DuplicatesPlugin(BeetsPlugin):
106 """List duplicate tracks or albums
107 """
108 def __init__(self):
109 super(DuplicatesPlugin, self).__init__()
110
111 self.config.add({
112 'format': '',
113 'count': False,
114 'album': False,
115 'full': False,
116 'path': False,
117 'keys': ['mb_trackid', 'mb_albumid'],
118 'checksum': None,
119 'copy': False,
120 'move': False,
121 'delete': False,
122 'tag': False,
123 })
124
125 self._command = Subcommand('duplicates',
126 help=__doc__,
127 aliases=['dup'])
128 self._command.parser.add_option('-c', '--count', dest='count',
129 action='store_true',
130 help='show duplicate counts')
131
132 self._command.parser.add_option('-C', '--checksum', dest='checksum',
133 action='store', metavar='PROG',
134 help='report duplicates based on'
135 ' arbitrary command')
136
137 self._command.parser.add_option('-d', '--delete', dest='delete',
138 action='store_true',
139 help='delete items from library and '
140 'disk')
141
142 self._command.parser.add_option('-F', '--full', dest='full',
143 action='store_true',
144 help='show all versions of duplicate'
145 ' tracks or albums')
146
147 self._command.parser.add_option('-k', '--keys', dest='keys',
148 action='callback', metavar='KEY1 KEY2',
149 callback=vararg_callback,
150 help='report duplicates based on keys')
151
152 self._command.parser.add_option('-m', '--move', dest='move',
153 action='store', metavar='DEST',
154 help='move items to dest')
155
156 self._command.parser.add_option('-o', '--copy', dest='copy',
157 action='store', metavar='DEST',
158 help='copy items to dest')
159
160 self._command.parser.add_option('-t', '--tag', dest='tag',
161 action='store',
162 help='tag matched items with \'k=v\''
163 ' attribute')
164 self._command.parser.add_all_common_options()
165
166 def commands(self):
167
168 def _dup(lib, opts, args):
169 self.config.set_args(opts)
170 fmt = self.config['format'].get()
171 album = self.config['album'].get(bool)
172 full = self.config['full'].get(bool)
173 keys = self.config['keys'].get()
174 checksum = self.config['checksum'].get()
175 copy = self.config['copy'].get()
176 move = self.config['move'].get()
177 delete = self.config['delete'].get(bool)
178 tag = self.config['tag'].get()
179
180 if album:
181 keys = ['mb_albumid']
182 items = lib.albums(decargs(args))
183 else:
184 items = lib.items(decargs(args))
185
186 if self.config['path']:
187 fmt = '$path'
188
189 # Default format string for count mode.
190 if self.config['count'] and not fmt:
191 if album:
192 fmt = '$albumartist - $album'
193 else:
194 fmt = '$albumartist - $album - $title'
195 fmt += ': {0}'
196
197 if checksum:
198 if not isinstance(checksum, basestring):
199 raise UserError(
200 'duplicates: "checksum" option must be a command'
201 )
202 for i in items:
203 k, _ = self._checksum(i, checksum, self._log)
204 keys = [k]
205
206 for obj_id, obj_count, objs in _duplicates(items,
207 keys=keys,
208 full=full,
209 log=self._log):
210 if obj_id: # Skip empty IDs.
211 for o in objs:
212 _process_item(o, lib,
213 copy=copy,
214 move=move,
215 delete=delete,
216 tag=tag,
217 fmt=fmt.format(obj_count))
218
219 self._command.func = _dup
220 return [self._command]
221
[end of beetsplug/duplicates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/beetsplug/duplicates.py b/beetsplug/duplicates.py
--- a/beetsplug/duplicates.py
+++ b/beetsplug/duplicates.py
@@ -74,30 +74,37 @@
return key, checksum
-def _group_by(objs, keys, log):
+def _group_by(objs, keys, strict, log):
"""Return a dictionary with keys arbitrary concatenations of attributes and
values lists of objects (Albums or Items) with those keys.
+
+ If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
- if values:
+ if strict and len(values) < len(keys):
+ log.debug(u'{0}: some keys {1} on item {2} are null or empty: '
+ 'skipping',
+ PLUGIN, keys, displayable_path(obj.path))
+ elif (not strict and not len(values)):
+ log.debug(u'{0}: all keys {1} on item {2} are null or empty: '
+ 'skipping',
+ PLUGIN, keys, displayable_path(obj.path))
+ else:
key = '\001'.join(values)
counts[key].append(obj)
- else:
- log.debug(u'{0}: all keys {1} on item {2} are null: skipping',
- PLUGIN, keys, displayable_path(obj.path))
return counts
-def _duplicates(objs, keys, full, log):
+def _duplicates(objs, keys, full, strict, log):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
- for k, objs in _group_by(objs, keys, log).iteritems():
+ for k, objs in _group_by(objs, keys, strict, log).iteritems():
if len(objs) > 1:
yield (k, len(objs) - offset, objs[offset:])
@@ -113,6 +120,7 @@
'count': False,
'album': False,
'full': False,
+ 'strict': False,
'path': False,
'keys': ['mb_trackid', 'mb_albumid'],
'checksum': None,
@@ -144,6 +152,11 @@
help='show all versions of duplicate'
' tracks or albums')
+ self._command.parser.add_option('-s', '--strict', dest='strict',
+ action='store_true',
+ help='report duplicates only if all'
+ ' attributes are set')
+
self._command.parser.add_option('-k', '--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
@@ -170,6 +183,7 @@
fmt = self.config['format'].get()
album = self.config['album'].get(bool)
full = self.config['full'].get(bool)
+ strict = self.config['strict'].get(bool)
keys = self.config['keys'].get()
checksum = self.config['checksum'].get()
copy = self.config['copy'].get()
@@ -206,6 +220,7 @@
for obj_id, obj_count, objs in _duplicates(items,
keys=keys,
full=full,
+ strict=strict,
log=self._log):
if obj_id: # Skip empty IDs.
for o in objs:
|
{"golden_diff": "diff --git a/beetsplug/duplicates.py b/beetsplug/duplicates.py\n--- a/beetsplug/duplicates.py\n+++ b/beetsplug/duplicates.py\n@@ -74,30 +74,37 @@\n return key, checksum\n \n \n-def _group_by(objs, keys, log):\n+def _group_by(objs, keys, strict, log):\n \"\"\"Return a dictionary with keys arbitrary concatenations of attributes and\n values lists of objects (Albums or Items) with those keys.\n+\n+ If strict, all attributes must be defined for a duplicate match.\n \"\"\"\n import collections\n counts = collections.defaultdict(list)\n for obj in objs:\n values = [getattr(obj, k, None) for k in keys]\n values = [v for v in values if v not in (None, '')]\n- if values:\n+ if strict and len(values) < len(keys):\n+ log.debug(u'{0}: some keys {1} on item {2} are null or empty: '\n+ 'skipping',\n+ PLUGIN, keys, displayable_path(obj.path))\n+ elif (not strict and not len(values)):\n+ log.debug(u'{0}: all keys {1} on item {2} are null or empty: '\n+ 'skipping',\n+ PLUGIN, keys, displayable_path(obj.path))\n+ else:\n key = '\\001'.join(values)\n counts[key].append(obj)\n- else:\n- log.debug(u'{0}: all keys {1} on item {2} are null: skipping',\n- PLUGIN, keys, displayable_path(obj.path))\n \n return counts\n \n \n-def _duplicates(objs, keys, full, log):\n+def _duplicates(objs, keys, full, strict, log):\n \"\"\"Generate triples of keys, duplicate counts, and constituent objects.\n \"\"\"\n offset = 0 if full else 1\n- for k, objs in _group_by(objs, keys, log).iteritems():\n+ for k, objs in _group_by(objs, keys, strict, log).iteritems():\n if len(objs) > 1:\n yield (k, len(objs) - offset, objs[offset:])\n \n@@ -113,6 +120,7 @@\n 'count': False,\n 'album': False,\n 'full': False,\n+ 'strict': False,\n 'path': False,\n 'keys': ['mb_trackid', 'mb_albumid'],\n 'checksum': None,\n@@ -144,6 +152,11 @@\n help='show all versions of duplicate'\n ' tracks or albums')\n \n+ self._command.parser.add_option('-s', '--strict', dest='strict',\n+ action='store_true',\n+ help='report duplicates only if all'\n+ ' attributes are set')\n+\n self._command.parser.add_option('-k', '--keys', dest='keys',\n action='callback', metavar='KEY1 KEY2',\n callback=vararg_callback,\n@@ -170,6 +183,7 @@\n fmt = self.config['format'].get()\n album = self.config['album'].get(bool)\n full = self.config['full'].get(bool)\n+ strict = self.config['strict'].get(bool)\n keys = self.config['keys'].get()\n checksum = self.config['checksum'].get()\n copy = self.config['copy'].get()\n@@ -206,6 +220,7 @@\n for obj_id, obj_count, objs in _duplicates(items,\n keys=keys,\n full=full,\n+ strict=strict,\n log=self._log):\n if obj_id: # Skip empty IDs.\n for o in objs:\n", "issue": "duplicates: Do not report duplicates when only `mb_albumid` is duplicated\n```\n$ beet duplicates -F\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/03 - The Magic Friend.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/04 - Contrast.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/05 - Desire.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/06 - Pacific Walk.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/07 - Workaholic.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/08 - Rougher Than The Average.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/09 - Delight.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/10 - Eternally Yours.mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/11 - Twilight Zone (Rave Version Edit).mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/12 - Get Ready For This (Vocal Version).mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/13 - The Magic Friend (Instrumental).mp3\n/home/simon/M\u00fasica/2 Unlimited/Get Ready/14 - Workaholic (Instrumental).mp3\n```\n\n```\n$ ll M\u00fasica/2\\ Unlimited/Get\\ Ready/\ntotal 83M\n-rw-rw-r-- 1 simon simon 7,5M oct 10 16:00 03 - The Magic Friend.mp3\n-rw-rw-r-- 1 simon simon 6,4M oct 10 16:00 04 - Contrast.mp3\n-rw-rw-r-- 1 simon simon 7,9M oct 10 16:00 05 - Desire.mp3\n-rw-rw-r-- 1 simon simon 5,7M oct 10 16:00 06 - Pacific Walk.mp3\n-rw-rw-r-- 1 simon simon 7,2M oct 10 16:00 07 - Workaholic.mp3\n-rw-rw-r-- 1 simon simon 7,6M oct 10 16:00 08 - Rougher Than The Average.mp3\n-rw-rw-r-- 1 simon simon 6,5M oct 10 16:00 09 - Delight.mp3\n-rw-rw-r-- 1 simon simon 7,6M oct 10 16:00 10 - Eternally Yours.mp3\n-rw-rw-r-- 1 simon simon 7,1M oct 10 16:00 11 - Twilight Zone (Rave Version Edit).mp3\n-rw-rw-r-- 1 simon simon 6,4M oct 10 16:00 12 - Get Ready For This (Vocal Version).mp3\n-rw-rw-r-- 1 simon simon 5,9M oct 10 16:00 13 - The Magic Friend (Instrumental).mp3\n-rw-rw-r-- 1 simon simon 7,2M oct 10 16:00 14 - Workaholic (Instrumental).mp3\n-rw-rw-r-- 1 simon simon 30K oct 10 16:00 cover.jpg\n```\n\nWhat is the duplicate of that?\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Pedro Silva.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"List duplicate tracks or albums.\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport shlex\n\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import decargs, print_, vararg_callback, Subcommand, UserError\nfrom beets.util import command_output, displayable_path, subprocess\n\nPLUGIN = 'duplicates'\n\n\ndef _process_item(item, lib, copy=False, move=False, delete=False,\n tag=False, fmt=''):\n \"\"\"Process Item `item` in `lib`.\n \"\"\"\n if copy:\n item.move(basedir=copy, copy=True)\n item.store()\n if move:\n item.move(basedir=move, copy=False)\n item.store()\n if delete:\n item.remove(delete=True)\n if tag:\n try:\n k, v = tag.split('=')\n except:\n raise UserError('%s: can\\'t parse k=v tag: %s' % (PLUGIN, tag))\n setattr(k, v)\n item.store()\n print_(format(item, fmt))\n\n\ndef _checksum(item, prog, log):\n \"\"\"Run external `prog` on file path associated with `item`, cache\n output as flexattr on a key that is the name of the program, and\n return the key, checksum tuple.\n \"\"\"\n args = [p.format(file=item.path) for p in shlex.split(prog)]\n key = args[0]\n checksum = getattr(item, key, False)\n if not checksum:\n log.debug(u'{0}: key {1} on item {2} not cached: computing checksum',\n PLUGIN, key, displayable_path(item.path))\n try:\n checksum = command_output(args)\n setattr(item, key, checksum)\n item.store()\n log.debug(u'{0}: computed checksum for {1} using {2}',\n PLUGIN, item.title, key)\n except subprocess.CalledProcessError as e:\n log.debug(u'{0}: failed to checksum {1}: {2}',\n PLUGIN, displayable_path(item.path), e)\n else:\n log.debug(u'{0}: key {1} on item {2} cached: not computing checksum',\n PLUGIN, key, displayable_path(item.path))\n return key, checksum\n\n\ndef _group_by(objs, keys, log):\n \"\"\"Return a dictionary with keys arbitrary concatenations of attributes and\n values lists of objects (Albums or Items) with those keys.\n \"\"\"\n import collections\n counts = collections.defaultdict(list)\n for obj in objs:\n values = [getattr(obj, k, None) for k in keys]\n values = [v for v in values if v not in (None, '')]\n if values:\n key = '\\001'.join(values)\n counts[key].append(obj)\n else:\n log.debug(u'{0}: all keys {1} on item {2} are null: skipping',\n PLUGIN, keys, displayable_path(obj.path))\n\n return counts\n\n\ndef _duplicates(objs, keys, full, log):\n \"\"\"Generate triples of keys, duplicate counts, and constituent objects.\n \"\"\"\n offset = 0 if full else 1\n for k, objs in _group_by(objs, keys, log).iteritems():\n if len(objs) > 1:\n yield (k, len(objs) - offset, objs[offset:])\n\n\nclass DuplicatesPlugin(BeetsPlugin):\n \"\"\"List duplicate tracks or albums\n \"\"\"\n def __init__(self):\n super(DuplicatesPlugin, self).__init__()\n\n self.config.add({\n 'format': '',\n 'count': False,\n 'album': False,\n 'full': False,\n 'path': False,\n 'keys': ['mb_trackid', 'mb_albumid'],\n 'checksum': None,\n 'copy': False,\n 'move': False,\n 'delete': False,\n 'tag': False,\n })\n\n self._command = Subcommand('duplicates',\n help=__doc__,\n aliases=['dup'])\n self._command.parser.add_option('-c', '--count', dest='count',\n action='store_true',\n help='show duplicate counts')\n\n self._command.parser.add_option('-C', '--checksum', dest='checksum',\n action='store', metavar='PROG',\n help='report duplicates based on'\n ' arbitrary command')\n\n self._command.parser.add_option('-d', '--delete', dest='delete',\n action='store_true',\n help='delete items from library and '\n 'disk')\n\n self._command.parser.add_option('-F', '--full', dest='full',\n action='store_true',\n help='show all versions of duplicate'\n ' tracks or albums')\n\n self._command.parser.add_option('-k', '--keys', dest='keys',\n action='callback', metavar='KEY1 KEY2',\n callback=vararg_callback,\n help='report duplicates based on keys')\n\n self._command.parser.add_option('-m', '--move', dest='move',\n action='store', metavar='DEST',\n help='move items to dest')\n\n self._command.parser.add_option('-o', '--copy', dest='copy',\n action='store', metavar='DEST',\n help='copy items to dest')\n\n self._command.parser.add_option('-t', '--tag', dest='tag',\n action='store',\n help='tag matched items with \\'k=v\\''\n ' attribute')\n self._command.parser.add_all_common_options()\n\n def commands(self):\n\n def _dup(lib, opts, args):\n self.config.set_args(opts)\n fmt = self.config['format'].get()\n album = self.config['album'].get(bool)\n full = self.config['full'].get(bool)\n keys = self.config['keys'].get()\n checksum = self.config['checksum'].get()\n copy = self.config['copy'].get()\n move = self.config['move'].get()\n delete = self.config['delete'].get(bool)\n tag = self.config['tag'].get()\n\n if album:\n keys = ['mb_albumid']\n items = lib.albums(decargs(args))\n else:\n items = lib.items(decargs(args))\n\n if self.config['path']:\n fmt = '$path'\n\n # Default format string for count mode.\n if self.config['count'] and not fmt:\n if album:\n fmt = '$albumartist - $album'\n else:\n fmt = '$albumartist - $album - $title'\n fmt += ': {0}'\n\n if checksum:\n if not isinstance(checksum, basestring):\n raise UserError(\n 'duplicates: \"checksum\" option must be a command'\n )\n for i in items:\n k, _ = self._checksum(i, checksum, self._log)\n keys = [k]\n\n for obj_id, obj_count, objs in _duplicates(items,\n keys=keys,\n full=full,\n log=self._log):\n if obj_id: # Skip empty IDs.\n for o in objs:\n _process_item(o, lib,\n copy=copy,\n move=move,\n delete=delete,\n tag=tag,\n fmt=fmt.format(obj_count))\n\n self._command.func = _dup\n return [self._command]\n", "path": "beetsplug/duplicates.py"}]}
| 3,635 | 815 |
gh_patches_debug_12859
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-2556
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plain_str not consistent between Py2 and Py3
**Python 2 (expected result)**
```python
>>> plain_str(b'\x3e\xb4\xef\x2c\x11')
'>\xb4\xef,\x11'
```
**Python 3 (unexpected result)**
```python
>>> plain_str(b'\x3e\xb4\xef\x2c\x11')
'>,\x11'
```
**Cause** is `errors="ignore"`
```python
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="ignore")
return str(x)
```
It just ignores values that are not in a valid range for the codec.
**Possible fix**
```python
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="backslashreplace")
return str(x)
```
But this breaks compatibility for Python 3.4
</issue>
<code>
[start of scapy/compat.py]
1 # This file is part of Scapy
2 # See http://www.secdev.org/projects/scapy for more information
3 # Copyright (C) Philippe Biondi <[email protected]>
4 # Copyright (C) Gabriel Potter <[email protected]>
5 # This program is published under a GPLv2 license
6
7 """
8 Python 2 and 3 link classes.
9 """
10
11 from __future__ import absolute_import
12 import base64
13 import binascii
14 import gzip
15 import struct
16
17 import scapy.modules.six as six
18
19 ###########
20 # Python3 #
21 ###########
22
23
24 def lambda_tuple_converter(func):
25 """
26 Converts a Python 2 function as
27 lambda (x,y): x + y
28 In the Python 3 format:
29 lambda x,y : x + y
30 """
31 if func is not None and func.__code__.co_argcount == 1:
32 return lambda *args: func(args[0] if len(args) == 1 else args)
33 else:
34 return func
35
36
37 if six.PY2:
38 bytes_encode = plain_str = str
39 chb = lambda x: x if isinstance(x, str) else chr(x)
40 orb = ord
41
42 def raw(x):
43 """Builds a packet and returns its bytes representation.
44 This function is and always be cross-version compatible"""
45 if hasattr(x, "__bytes__"):
46 return x.__bytes__()
47 return bytes(x)
48 else:
49 def raw(x):
50 """Builds a packet and returns its bytes representation.
51 This function is and always be cross-version compatible"""
52 return bytes(x)
53
54 def bytes_encode(x):
55 """Ensure that the given object is bytes.
56 If the parameter is a packet, raw() should be preferred.
57 """
58 if isinstance(x, str):
59 return x.encode()
60 return bytes(x)
61
62 def plain_str(x):
63 """Convert basic byte objects to str"""
64 if isinstance(x, bytes):
65 return x.decode(errors="ignore")
66 return str(x)
67
68 def chb(x):
69 """Same than chr() but encode as bytes."""
70 return struct.pack("!B", x)
71
72 def orb(x):
73 """Return ord(x) when not already an int."""
74 if isinstance(x, int):
75 return x
76 return ord(x)
77
78
79 def bytes_hex(x):
80 """Hexify a str or a bytes object"""
81 return binascii.b2a_hex(bytes_encode(x))
82
83
84 def hex_bytes(x):
85 """De-hexify a str or a byte object"""
86 return binascii.a2b_hex(bytes_encode(x))
87
88
89 def base64_bytes(x):
90 """Turn base64 into bytes"""
91 if six.PY2:
92 return base64.decodestring(x)
93 return base64.decodebytes(bytes_encode(x))
94
95
96 def bytes_base64(x):
97 """Turn bytes into base64"""
98 if six.PY2:
99 return base64.encodestring(x).replace('\n', '')
100 return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'')
101
102
103 if six.PY2:
104 from StringIO import StringIO
105
106 def gzip_decompress(x):
107 """Decompress using gzip"""
108 with gzip.GzipFile(fileobj=StringIO(x), mode='rb') as fdesc:
109 return fdesc.read()
110
111 def gzip_compress(x):
112 """Compress using gzip"""
113 buf = StringIO()
114 with gzip.GzipFile(fileobj=buf, mode='wb') as fdesc:
115 fdesc.write(x)
116 return buf.getvalue()
117 else:
118 gzip_decompress = gzip.decompress
119 gzip_compress = gzip.compress
120
121 # Typing compatibility
122
123 try:
124 # Only required if using mypy-lang for static typing
125 from typing import Optional, List, Union, Callable, Any, AnyStr, Tuple, \
126 Sized, Dict, Pattern, cast
127 except ImportError:
128 # Let's make some fake ones.
129
130 def cast(_type, obj):
131 return obj
132
133 class _FakeType(object):
134 # make the objects subscriptable indefinetly
135 def __getitem__(self, item):
136 return _FakeType()
137
138 Optional = _FakeType()
139 Union = _FakeType()
140 Callable = _FakeType()
141 List = _FakeType()
142 Dict = _FakeType()
143 Any = _FakeType()
144 AnyStr = _FakeType()
145 Tuple = _FakeType()
146 Pattern = _FakeType()
147
148 class Sized(object):
149 pass
150
[end of scapy/compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scapy/compat.py b/scapy/compat.py
--- a/scapy/compat.py
+++ b/scapy/compat.py
@@ -59,11 +59,19 @@
return x.encode()
return bytes(x)
- def plain_str(x):
- """Convert basic byte objects to str"""
- if isinstance(x, bytes):
- return x.decode(errors="ignore")
- return str(x)
+ if six.PY34:
+ def plain_str(x):
+ """Convert basic byte objects to str"""
+ if isinstance(x, bytes):
+ return x.decode(errors="ignore")
+ return str(x)
+ else:
+ # Python 3.5+
+ def plain_str(x):
+ """Convert basic byte objects to str"""
+ if isinstance(x, bytes):
+ return x.decode(errors="backslashreplace")
+ return str(x)
def chb(x):
"""Same than chr() but encode as bytes."""
|
{"golden_diff": "diff --git a/scapy/compat.py b/scapy/compat.py\n--- a/scapy/compat.py\n+++ b/scapy/compat.py\n@@ -59,11 +59,19 @@\n return x.encode()\n return bytes(x)\n \n- def plain_str(x):\n- \"\"\"Convert basic byte objects to str\"\"\"\n- if isinstance(x, bytes):\n- return x.decode(errors=\"ignore\")\n- return str(x)\n+ if six.PY34:\n+ def plain_str(x):\n+ \"\"\"Convert basic byte objects to str\"\"\"\n+ if isinstance(x, bytes):\n+ return x.decode(errors=\"ignore\")\n+ return str(x)\n+ else:\n+ # Python 3.5+\n+ def plain_str(x):\n+ \"\"\"Convert basic byte objects to str\"\"\"\n+ if isinstance(x, bytes):\n+ return x.decode(errors=\"backslashreplace\")\n+ return str(x)\n \n def chb(x):\n \"\"\"Same than chr() but encode as bytes.\"\"\"\n", "issue": "plain_str not consistent between Py2 and Py3\n**Python 2 (expected result)**\r\n```python\r\n>>> plain_str(b'\\x3e\\xb4\\xef\\x2c\\x11')\r\n'>\\xb4\\xef,\\x11'\r\n```\r\n\r\n**Python 3 (unexpected result)**\r\n```python\r\n>>> plain_str(b'\\x3e\\xb4\\xef\\x2c\\x11') \r\n'>,\\x11'\r\n```\r\n\r\n**Cause** is `errors=\"ignore\"`\r\n```python\r\ndef plain_str(x): \r\n \"\"\"Convert basic byte objects to str\"\"\"\r\n if isinstance(x, bytes): \r\n return x.decode(errors=\"ignore\") \r\n return str(x) \r\n```\r\n\r\nIt just ignores values that are not in a valid range for the codec.\r\n\r\n**Possible fix**\r\n```python\r\ndef plain_str(x): \r\n \"\"\"Convert basic byte objects to str\"\"\"\r\n if isinstance(x, bytes): \r\n return x.decode(errors=\"backslashreplace\") \r\n return str(x) \r\n``` \r\nBut this breaks compatibility for Python 3.4\r\n\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# Copyright (C) Gabriel Potter <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nPython 2 and 3 link classes.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport base64\nimport binascii\nimport gzip\nimport struct\n\nimport scapy.modules.six as six\n\n###########\n# Python3 #\n###########\n\n\ndef lambda_tuple_converter(func):\n \"\"\"\n Converts a Python 2 function as\n lambda (x,y): x + y\n In the Python 3 format:\n lambda x,y : x + y\n \"\"\"\n if func is not None and func.__code__.co_argcount == 1:\n return lambda *args: func(args[0] if len(args) == 1 else args)\n else:\n return func\n\n\nif six.PY2:\n bytes_encode = plain_str = str\n chb = lambda x: x if isinstance(x, str) else chr(x)\n orb = ord\n\n def raw(x):\n \"\"\"Builds a packet and returns its bytes representation.\n This function is and always be cross-version compatible\"\"\"\n if hasattr(x, \"__bytes__\"):\n return x.__bytes__()\n return bytes(x)\nelse:\n def raw(x):\n \"\"\"Builds a packet and returns its bytes representation.\n This function is and always be cross-version compatible\"\"\"\n return bytes(x)\n\n def bytes_encode(x):\n \"\"\"Ensure that the given object is bytes.\n If the parameter is a packet, raw() should be preferred.\n \"\"\"\n if isinstance(x, str):\n return x.encode()\n return bytes(x)\n\n def plain_str(x):\n \"\"\"Convert basic byte objects to str\"\"\"\n if isinstance(x, bytes):\n return x.decode(errors=\"ignore\")\n return str(x)\n\n def chb(x):\n \"\"\"Same than chr() but encode as bytes.\"\"\"\n return struct.pack(\"!B\", x)\n\n def orb(x):\n \"\"\"Return ord(x) when not already an int.\"\"\"\n if isinstance(x, int):\n return x\n return ord(x)\n\n\ndef bytes_hex(x):\n \"\"\"Hexify a str or a bytes object\"\"\"\n return binascii.b2a_hex(bytes_encode(x))\n\n\ndef hex_bytes(x):\n \"\"\"De-hexify a str or a byte object\"\"\"\n return binascii.a2b_hex(bytes_encode(x))\n\n\ndef base64_bytes(x):\n \"\"\"Turn base64 into bytes\"\"\"\n if six.PY2:\n return base64.decodestring(x)\n return base64.decodebytes(bytes_encode(x))\n\n\ndef bytes_base64(x):\n \"\"\"Turn bytes into base64\"\"\"\n if six.PY2:\n return base64.encodestring(x).replace('\\n', '')\n return base64.encodebytes(bytes_encode(x)).replace(b'\\n', b'')\n\n\nif six.PY2:\n from StringIO import StringIO\n\n def gzip_decompress(x):\n \"\"\"Decompress using gzip\"\"\"\n with gzip.GzipFile(fileobj=StringIO(x), mode='rb') as fdesc:\n return fdesc.read()\n\n def gzip_compress(x):\n \"\"\"Compress using gzip\"\"\"\n buf = StringIO()\n with gzip.GzipFile(fileobj=buf, mode='wb') as fdesc:\n fdesc.write(x)\n return buf.getvalue()\nelse:\n gzip_decompress = gzip.decompress\n gzip_compress = gzip.compress\n\n# Typing compatibility\n\ntry:\n # Only required if using mypy-lang for static typing\n from typing import Optional, List, Union, Callable, Any, AnyStr, Tuple, \\\n Sized, Dict, Pattern, cast\nexcept ImportError:\n # Let's make some fake ones.\n\n def cast(_type, obj):\n return obj\n\n class _FakeType(object):\n # make the objects subscriptable indefinetly\n def __getitem__(self, item):\n return _FakeType()\n\n Optional = _FakeType()\n Union = _FakeType()\n Callable = _FakeType()\n List = _FakeType()\n Dict = _FakeType()\n Any = _FakeType()\n AnyStr = _FakeType()\n Tuple = _FakeType()\n Pattern = _FakeType()\n\n class Sized(object):\n pass\n", "path": "scapy/compat.py"}]}
| 2,068 | 219 |
gh_patches_debug_36045
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-3176
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
underscore_attrs_are_private and validate_all not compatible with validate_arguments
Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`:
```
pydantic version: 1.8.2
pydantic compiled: True
install path: /home/xxx/.conda/envs/py38/lib/python3.8/site-packages/pydantic
python version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0]
platform: Linux-4.18.0-80.el8.x86_64-x86_64-with-glibc2.10
optional deps. installed: ['typing-extensions']
```
`underscore_attrs_are_private` not work with `validate_arguments` just like it not work with `create_model` #3134 .
When I define a function decorated by `validate_arguments` with `validate_all` been set `True`, It will raise `ValidationError`:
```py
from datetime import datetime
from pydantic import validate_arguments, Field
@validate_arguments(config={'validate_all': True})
def foo(dt: datetime = Field(default_factory=lambda : 42)):
print(type(dt))
foo()
```
```
---------------------------------------------------------------------------
ValidationError Traceback (most recent call last)
<ipython-input-4-6c97ede6fd47> in <module>
7 print(type(dt))
8
----> 9 foo()
~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.validate_arguments.validate.wrapper_function()
~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.ValidatedFunction.call()
~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.ValidatedFunction.init_model_instance()
~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/main.cpython-38-x86_64-linux-gnu.so in pydantic.main.BaseModel.__init__()
ValidationError: 3 validation errors for Foo
v__duplicate_kwargs
object of type 'NoneType' has no len() (type=type_error)
args
object of type 'NoneType' has no len() (type=type_error)
kwargs
object of type 'NoneType' has no len() (type=type_error)
```
</issue>
<code>
[start of pydantic/decorator.py]
1 from functools import wraps
2 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload
3
4 from . import validator
5 from .config import Extra
6 from .errors import ConfigError
7 from .main import BaseModel, create_model
8 from .typing import get_all_type_hints
9 from .utils import to_camel
10
11 __all__ = ('validate_arguments',)
12
13 if TYPE_CHECKING:
14 from .typing import AnyCallable
15
16 AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)
17 ConfigType = Union[None, Type[Any], Dict[str, Any]]
18
19
20 @overload
21 def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:
22 ...
23
24
25 @overload
26 def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':
27 ...
28
29
30 def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:
31 """
32 Decorator to validate the arguments passed to a function.
33 """
34
35 def validate(_func: 'AnyCallable') -> 'AnyCallable':
36 vd = ValidatedFunction(_func, config)
37
38 @wraps(_func)
39 def wrapper_function(*args: Any, **kwargs: Any) -> Any:
40 return vd.call(*args, **kwargs)
41
42 wrapper_function.vd = vd # type: ignore
43 wrapper_function.validate = vd.init_model_instance # type: ignore
44 wrapper_function.raw_function = vd.raw_function # type: ignore
45 wrapper_function.model = vd.model # type: ignore
46 return wrapper_function
47
48 if func:
49 return validate(func)
50 else:
51 return validate
52
53
54 ALT_V_ARGS = 'v__args'
55 ALT_V_KWARGS = 'v__kwargs'
56 V_POSITIONAL_ONLY_NAME = 'v__positional_only'
57 V_DUPLICATE_KWARGS = 'v__duplicate_kwargs'
58
59
60 class ValidatedFunction:
61 def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901
62 from inspect import Parameter, signature
63
64 parameters: Mapping[str, Parameter] = signature(function).parameters
65
66 if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:
67 raise ConfigError(
68 f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" '
69 f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator'
70 )
71
72 self.raw_function = function
73 self.arg_mapping: Dict[int, str] = {}
74 self.positional_only_args = set()
75 self.v_args_name = 'args'
76 self.v_kwargs_name = 'kwargs'
77
78 type_hints = get_all_type_hints(function)
79 takes_args = False
80 takes_kwargs = False
81 fields: Dict[str, Tuple[Any, Any]] = {}
82 for i, (name, p) in enumerate(parameters.items()):
83 if p.annotation is p.empty:
84 annotation = Any
85 else:
86 annotation = type_hints[name]
87
88 default = ... if p.default is p.empty else p.default
89 if p.kind == Parameter.POSITIONAL_ONLY:
90 self.arg_mapping[i] = name
91 fields[name] = annotation, default
92 fields[V_POSITIONAL_ONLY_NAME] = List[str], None
93 self.positional_only_args.add(name)
94 elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:
95 self.arg_mapping[i] = name
96 fields[name] = annotation, default
97 fields[V_DUPLICATE_KWARGS] = List[str], None
98 elif p.kind == Parameter.KEYWORD_ONLY:
99 fields[name] = annotation, default
100 elif p.kind == Parameter.VAR_POSITIONAL:
101 self.v_args_name = name
102 fields[name] = Tuple[annotation, ...], None
103 takes_args = True
104 else:
105 assert p.kind == Parameter.VAR_KEYWORD, p.kind
106 self.v_kwargs_name = name
107 fields[name] = Dict[str, annotation], None # type: ignore
108 takes_kwargs = True
109
110 # these checks avoid a clash between "args" and a field with that name
111 if not takes_args and self.v_args_name in fields:
112 self.v_args_name = ALT_V_ARGS
113
114 # same with "kwargs"
115 if not takes_kwargs and self.v_kwargs_name in fields:
116 self.v_kwargs_name = ALT_V_KWARGS
117
118 if not takes_args:
119 # we add the field so validation below can raise the correct exception
120 fields[self.v_args_name] = List[Any], None
121
122 if not takes_kwargs:
123 # same with kwargs
124 fields[self.v_kwargs_name] = Dict[Any, Any], None
125
126 self.create_model(fields, takes_args, takes_kwargs, config)
127
128 def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:
129 values = self.build_values(args, kwargs)
130 return self.model(**values)
131
132 def call(self, *args: Any, **kwargs: Any) -> Any:
133 m = self.init_model_instance(*args, **kwargs)
134 return self.execute(m)
135
136 def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:
137 values: Dict[str, Any] = {}
138 if args:
139 arg_iter = enumerate(args)
140 while True:
141 try:
142 i, a = next(arg_iter)
143 except StopIteration:
144 break
145 arg_name = self.arg_mapping.get(i)
146 if arg_name is not None:
147 values[arg_name] = a
148 else:
149 values[self.v_args_name] = [a] + [a for _, a in arg_iter]
150 break
151
152 var_kwargs = {}
153 wrong_positional_args = []
154 duplicate_kwargs = []
155 non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}
156 for k, v in kwargs.items():
157 if k in non_var_fields:
158 if k in self.positional_only_args:
159 wrong_positional_args.append(k)
160 if k in values:
161 duplicate_kwargs.append(k)
162 values[k] = v
163 else:
164 var_kwargs[k] = v
165
166 if var_kwargs:
167 values[self.v_kwargs_name] = var_kwargs
168 if wrong_positional_args:
169 values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args
170 if duplicate_kwargs:
171 values[V_DUPLICATE_KWARGS] = duplicate_kwargs
172 return values
173
174 def execute(self, m: BaseModel) -> Any:
175 d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}
176 var_kwargs = d.pop(self.v_kwargs_name, {})
177
178 if self.v_args_name in d:
179 args_: List[Any] = []
180 in_kwargs = False
181 kwargs = {}
182 for name, value in d.items():
183 if in_kwargs:
184 kwargs[name] = value
185 elif name == self.v_args_name:
186 args_ += value
187 in_kwargs = True
188 else:
189 args_.append(value)
190 return self.raw_function(*args_, **kwargs, **var_kwargs)
191 elif self.positional_only_args:
192 args_ = []
193 kwargs = {}
194 for name, value in d.items():
195 if name in self.positional_only_args:
196 args_.append(value)
197 else:
198 kwargs[name] = value
199 return self.raw_function(*args_, **kwargs, **var_kwargs)
200 else:
201 return self.raw_function(**d, **var_kwargs)
202
203 def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:
204 pos_args = len(self.arg_mapping)
205
206 class CustomConfig:
207 pass
208
209 if not TYPE_CHECKING: # pragma: no branch
210 if isinstance(config, dict):
211 CustomConfig = type('Config', (), config) # noqa: F811
212 elif config is not None:
213 CustomConfig = config # noqa: F811
214
215 if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):
216 raise ConfigError(
217 'Setting the "fields" and "alias_generator" property on custom Config for '
218 '@validate_arguments is not yet supported, please remove.'
219 )
220
221 class DecoratorBaseModel(BaseModel):
222 @validator(self.v_args_name, check_fields=False, allow_reuse=True)
223 def check_args(cls, v: List[Any]) -> List[Any]:
224 if takes_args:
225 return v
226
227 raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
228
229 @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
230 def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:
231 if takes_kwargs:
232 return v
233
234 plural = '' if len(v) == 1 else 's'
235 keys = ', '.join(map(repr, v.keys()))
236 raise TypeError(f'unexpected keyword argument{plural}: {keys}')
237
238 @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
239 def check_positional_only(cls, v: List[str]) -> None:
240 plural = '' if len(v) == 1 else 's'
241 keys = ', '.join(map(repr, v))
242 raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
243
244 @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
245 def check_duplicate_kwargs(cls, v: List[str]) -> None:
246 plural = '' if len(v) == 1 else 's'
247 keys = ', '.join(map(repr, v))
248 raise TypeError(f'multiple values for argument{plural}: {keys}')
249
250 class Config(CustomConfig):
251 extra = Extra.forbid
252
253 self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
254
[end of pydantic/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pydantic/decorator.py b/pydantic/decorator.py
--- a/pydantic/decorator.py
+++ b/pydantic/decorator.py
@@ -220,15 +220,15 @@
class DecoratorBaseModel(BaseModel):
@validator(self.v_args_name, check_fields=False, allow_reuse=True)
- def check_args(cls, v: List[Any]) -> List[Any]:
- if takes_args:
+ def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:
+ if takes_args or v is None:
return v
raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')
@validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)
- def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:
- if takes_kwargs:
+ def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ if takes_kwargs or v is None:
return v
plural = '' if len(v) == 1 else 's'
@@ -236,13 +236,19 @@
raise TypeError(f'unexpected keyword argument{plural}: {keys}')
@validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)
- def check_positional_only(cls, v: List[str]) -> None:
+ def check_positional_only(cls, v: Optional[List[str]]) -> None:
+ if v is None:
+ return
+
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')
@validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)
- def check_duplicate_kwargs(cls, v: List[str]) -> None:
+ def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:
+ if v is None:
+ return
+
plural = '' if len(v) == 1 else 's'
keys = ', '.join(map(repr, v))
raise TypeError(f'multiple values for argument{plural}: {keys}')
|
{"golden_diff": "diff --git a/pydantic/decorator.py b/pydantic/decorator.py\n--- a/pydantic/decorator.py\n+++ b/pydantic/decorator.py\n@@ -220,15 +220,15 @@\n \n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n- def check_args(cls, v: List[Any]) -> List[Any]:\n- if takes_args:\n+ def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:\n+ if takes_args or v is None:\n return v\n \n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n \n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n- def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:\n- if takes_kwargs:\n+ def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n+ if takes_kwargs or v is None:\n return v\n \n plural = '' if len(v) == 1 else 's'\n@@ -236,13 +236,19 @@\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n \n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n- def check_positional_only(cls, v: List[str]) -> None:\n+ def check_positional_only(cls, v: Optional[List[str]]) -> None:\n+ if v is None:\n+ return\n+\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n \n @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)\n- def check_duplicate_kwargs(cls, v: List[str]) -> None:\n+ def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:\n+ if v is None:\n+ return\n+\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n", "issue": "underscore_attrs_are_private and validate_all not compatible with validate_arguments\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.8.2\r\n pydantic compiled: True\r\n install path: /home/xxx/.conda/envs/py38/lib/python3.8/site-packages/pydantic\r\n python version: 3.8.5 (default, Sep 4 2020, 07:30:14) [GCC 7.3.0]\r\n platform: Linux-4.18.0-80.el8.x86_64-x86_64-with-glibc2.10\r\n optional deps. installed: ['typing-extensions']\r\n```\r\n`underscore_attrs_are_private` not work with `validate_arguments` just like it not work with `create_model` #3134 .\r\n\r\nWhen I define a function decorated by `validate_arguments` with `validate_all` been set `True`, It will raise `ValidationError`:\r\n\r\n```py\r\nfrom datetime import datetime\r\nfrom pydantic import validate_arguments, Field\r\n\r\n\r\n@validate_arguments(config={'validate_all': True})\r\ndef foo(dt: datetime = Field(default_factory=lambda : 42)):\r\n print(type(dt))\r\n\r\nfoo()\r\n```\r\n```\r\n---------------------------------------------------------------------------\r\nValidationError Traceback (most recent call last)\r\n<ipython-input-4-6c97ede6fd47> in <module>\r\n 7 print(type(dt))\r\n 8 \r\n----> 9 foo()\r\n\r\n~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.validate_arguments.validate.wrapper_function()\r\n\r\n~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.ValidatedFunction.call()\r\n\r\n~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/decorator.cpython-38-x86_64-linux-gnu.so in pydantic.decorator.ValidatedFunction.init_model_instance()\r\n\r\n~/.conda/envs/py38/lib/python3.8/site-packages/pydantic/main.cpython-38-x86_64-linux-gnu.so in pydantic.main.BaseModel.__init__()\r\n\r\nValidationError: 3 validation errors for Foo\r\nv__duplicate_kwargs\r\n object of type 'NoneType' has no len() (type=type_error)\r\nargs\r\n object of type 'NoneType' has no len() (type=type_error)\r\nkwargs\r\n object of type 'NoneType' has no len() (type=type_error)\r\n```\r\n\n", "before_files": [{"content": "from functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload\n\nfrom . import validator\nfrom .config import Extra\nfrom .errors import ConfigError\nfrom .main import BaseModel, create_model\nfrom .typing import get_all_type_hints\nfrom .utils import to_camel\n\n__all__ = ('validate_arguments',)\n\nif TYPE_CHECKING:\n from .typing import AnyCallable\n\n AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n\n\n@overload\ndef validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:\n ...\n\n\n@overload\ndef validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':\n ...\n\n\ndef validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n\n def validate(_func: 'AnyCallable') -> 'AnyCallable':\n vd = ValidatedFunction(_func, config)\n\n @wraps(_func)\n def wrapper_function(*args: Any, **kwargs: Any) -> Any:\n return vd.call(*args, **kwargs)\n\n wrapper_function.vd = vd # type: ignore\n wrapper_function.validate = vd.init_model_instance # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n return wrapper_function\n\n if func:\n return validate(func)\n else:\n return validate\n\n\nALT_V_ARGS = 'v__args'\nALT_V_KWARGS = 'v__kwargs'\nV_POSITIONAL_ONLY_NAME = 'v__positional_only'\nV_DUPLICATE_KWARGS = 'v__duplicate_kwargs'\n\n\nclass ValidatedFunction:\n def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n\n parameters: Mapping[str, Parameter] = signature(function).parameters\n\n if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:\n raise ConfigError(\n f'\"{ALT_V_ARGS}\", \"{ALT_V_KWARGS}\", \"{V_POSITIONAL_ONLY_NAME}\" and \"{V_DUPLICATE_KWARGS}\" '\n f'are not permitted as argument names when using the \"{validate_arguments.__name__}\" decorator'\n )\n\n self.raw_function = function\n self.arg_mapping: Dict[int, str] = {}\n self.positional_only_args = set()\n self.v_args_name = 'args'\n self.v_kwargs_name = 'kwargs'\n\n type_hints = get_all_type_hints(function)\n takes_args = False\n takes_kwargs = False\n fields: Dict[str, Tuple[Any, Any]] = {}\n for i, (name, p) in enumerate(parameters.items()):\n if p.annotation is p.empty:\n annotation = Any\n else:\n annotation = type_hints[name]\n\n default = ... if p.default is p.empty else p.default\n if p.kind == Parameter.POSITIONAL_ONLY:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_POSITIONAL_ONLY_NAME] = List[str], None\n self.positional_only_args.add(name)\n elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_DUPLICATE_KWARGS] = List[str], None\n elif p.kind == Parameter.KEYWORD_ONLY:\n fields[name] = annotation, default\n elif p.kind == Parameter.VAR_POSITIONAL:\n self.v_args_name = name\n fields[name] = Tuple[annotation, ...], None\n takes_args = True\n else:\n assert p.kind == Parameter.VAR_KEYWORD, p.kind\n self.v_kwargs_name = name\n fields[name] = Dict[str, annotation], None # type: ignore\n takes_kwargs = True\n\n # these checks avoid a clash between \"args\" and a field with that name\n if not takes_args and self.v_args_name in fields:\n self.v_args_name = ALT_V_ARGS\n\n # same with \"kwargs\"\n if not takes_kwargs and self.v_kwargs_name in fields:\n self.v_kwargs_name = ALT_V_KWARGS\n\n if not takes_args:\n # we add the field so validation below can raise the correct exception\n fields[self.v_args_name] = List[Any], None\n\n if not takes_kwargs:\n # same with kwargs\n fields[self.v_kwargs_name] = Dict[Any, Any], None\n\n self.create_model(fields, takes_args, takes_kwargs, config)\n\n def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:\n values = self.build_values(args, kwargs)\n return self.model(**values)\n\n def call(self, *args: Any, **kwargs: Any) -> Any:\n m = self.init_model_instance(*args, **kwargs)\n return self.execute(m)\n\n def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n if args:\n arg_iter = enumerate(args)\n while True:\n try:\n i, a = next(arg_iter)\n except StopIteration:\n break\n arg_name = self.arg_mapping.get(i)\n if arg_name is not None:\n values[arg_name] = a\n else:\n values[self.v_args_name] = [a] + [a for _, a in arg_iter]\n break\n\n var_kwargs = {}\n wrong_positional_args = []\n duplicate_kwargs = []\n non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}\n for k, v in kwargs.items():\n if k in non_var_fields:\n if k in self.positional_only_args:\n wrong_positional_args.append(k)\n if k in values:\n duplicate_kwargs.append(k)\n values[k] = v\n else:\n var_kwargs[k] = v\n\n if var_kwargs:\n values[self.v_kwargs_name] = var_kwargs\n if wrong_positional_args:\n values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args\n if duplicate_kwargs:\n values[V_DUPLICATE_KWARGS] = duplicate_kwargs\n return values\n\n def execute(self, m: BaseModel) -> Any:\n d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}\n var_kwargs = d.pop(self.v_kwargs_name, {})\n\n if self.v_args_name in d:\n args_: List[Any] = []\n in_kwargs = False\n kwargs = {}\n for name, value in d.items():\n if in_kwargs:\n kwargs[name] = value\n elif name == self.v_args_name:\n args_ += value\n in_kwargs = True\n else:\n args_.append(value)\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n elif self.positional_only_args:\n args_ = []\n kwargs = {}\n for name, value in d.items():\n if name in self.positional_only_args:\n args_.append(value)\n else:\n kwargs[name] = value\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n else:\n return self.raw_function(**d, **var_kwargs)\n\n def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:\n pos_args = len(self.arg_mapping)\n\n class CustomConfig:\n pass\n\n if not TYPE_CHECKING: # pragma: no branch\n if isinstance(config, dict):\n CustomConfig = type('Config', (), config) # noqa: F811\n elif config is not None:\n CustomConfig = config # noqa: F811\n\n if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):\n raise ConfigError(\n 'Setting the \"fields\" and \"alias_generator\" property on custom Config for '\n '@validate_arguments is not yet supported, please remove.'\n )\n\n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n def check_args(cls, v: List[Any]) -> List[Any]:\n if takes_args:\n return v\n\n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n\n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n def check_kwargs(cls, v: Dict[str, Any]) -> Dict[str, Any]:\n if takes_kwargs:\n return v\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v.keys()))\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n\n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n def check_positional_only(cls, v: List[str]) -> None:\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n\n @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)\n def check_duplicate_kwargs(cls, v: List[str]) -> None:\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n\n class Config(CustomConfig):\n extra = Extra.forbid\n\n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "path": "pydantic/decorator.py"}]}
| 4,008 | 507 |
gh_patches_debug_25839
|
rasdani/github-patches
|
git_diff
|
python-discord__site-650
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add an endpoint to fetch user information from a username.
We'd need this endpoint to be able to implement features requested in [this](https://github.com/python-discord/bot/issues/1757) bot issue.
```py
from django.shortcuts import get_object_or_404
@action(
detail=False,
methods=["GET"],
url_path=r"username/(?P<name>[^#]{,32})#(?P<discriminator>[0-9]{4})"
)
def get_by_username(self, request, name, discriminator):
user = get_object_or_404(User, name=name, discriminator=discriminator)
return Response(UserSerializer(user).data, status=status.HTTP_200_OK)
```
This addition to the user view set seems to make GET requests to URLs like `bot/users/username/Qwerty#0001` work , but there might be a more ideal way to do this.
</issue>
<code>
[start of pydis_site/apps/api/viewsets/bot/user.py]
1 import typing
2 from collections import OrderedDict
3
4 from django.db.models import Q
5 from rest_framework import status
6 from rest_framework.decorators import action
7 from rest_framework.pagination import PageNumberPagination
8 from rest_framework.request import Request
9 from rest_framework.response import Response
10 from rest_framework.serializers import ModelSerializer
11 from rest_framework.viewsets import ModelViewSet
12
13 from pydis_site.apps.api.models.bot.infraction import Infraction
14 from pydis_site.apps.api.models.bot.metricity import Metricity, NotFoundError
15 from pydis_site.apps.api.models.bot.user import User
16 from pydis_site.apps.api.serializers import UserSerializer
17
18
19 class UserListPagination(PageNumberPagination):
20 """Custom pagination class for the User Model."""
21
22 page_size = 2500
23 page_size_query_param = "page_size"
24
25 def get_next_page_number(self) -> typing.Optional[int]:
26 """Get the next page number."""
27 if not self.page.has_next():
28 return None
29 page_number = self.page.next_page_number()
30 return page_number
31
32 def get_previous_page_number(self) -> typing.Optional[int]:
33 """Get the previous page number."""
34 if not self.page.has_previous():
35 return None
36
37 page_number = self.page.previous_page_number()
38 return page_number
39
40 def get_paginated_response(self, data: list) -> Response:
41 """Override method to send modified response."""
42 return Response(OrderedDict([
43 ('count', self.page.paginator.count),
44 ('next_page_no', self.get_next_page_number()),
45 ('previous_page_no', self.get_previous_page_number()),
46 ('results', data)
47 ]))
48
49
50 class UserViewSet(ModelViewSet):
51 """
52 View providing CRUD operations on Discord users through the bot.
53
54 ## Routes
55 ### GET /bot/users
56 Returns all users currently known with pagination.
57
58 #### Response format
59 >>> {
60 ... 'count': 95000,
61 ... 'next_page_no': "2",
62 ... 'previous_page_no': None,
63 ... 'results': [
64 ... {
65 ... 'id': 409107086526644234,
66 ... 'name': "Python",
67 ... 'discriminator': 4329,
68 ... 'roles': [
69 ... 352427296948486144,
70 ... 270988689419665409,
71 ... 277546923144249364,
72 ... 458226699344019457
73 ... ],
74 ... 'in_guild': True
75 ... },
76 ... ]
77 ... }
78
79 #### Optional Query Parameters
80 - page_size: number of Users in one page, defaults to 10,000
81 - page: page number
82
83 #### Status codes
84 - 200: returned on success
85
86 ### GET /bot/users/<snowflake:int>
87 Gets a single user by ID.
88
89 #### Response format
90 >>> {
91 ... 'id': 409107086526644234,
92 ... 'name': "Python",
93 ... 'discriminator': 4329,
94 ... 'roles': [
95 ... 352427296948486144,
96 ... 270988689419665409,
97 ... 277546923144249364,
98 ... 458226699344019457
99 ... ],
100 ... 'in_guild': True
101 ... }
102
103 #### Status codes
104 - 200: returned on success
105 - 404: if a user with the given `snowflake` could not be found
106
107 ### GET /bot/users/<snowflake:int>/metricity_data
108 Gets metricity data for a single user by ID.
109
110 #### Response format
111 >>> {
112 ... "joined_at": "2020-10-06T21:54:23.540766",
113 ... "total_messages": 2,
114 ... "voice_banned": False,
115 ... "activity_blocks": 1
116 ...}
117
118 #### Status codes
119 - 200: returned on success
120 - 404: if a user with the given `snowflake` could not be found
121
122 ### GET /bot/users/<snowflake:int>/metricity_review_data
123 Gets metricity data for a single user's review by ID.
124
125 #### Response format
126 >>> {
127 ... 'joined_at': '2020-08-26T08:09:43.507000',
128 ... 'top_channel_activity': [['off-topic', 15],
129 ... ['talent-pool', 4],
130 ... ['defcon', 2]],
131 ... 'total_messages': 22
132 ... }
133
134 #### Status codes
135 - 200: returned on success
136 - 404: if a user with the given `snowflake` could not be found
137
138 ### POST /bot/users
139 Adds a single or multiple new users.
140 The roles attached to the user(s) must be roles known by the site.
141 Users that already exist in the database will be skipped.
142
143 #### Request body
144 >>> {
145 ... 'id': int,
146 ... 'name': str,
147 ... 'discriminator': int,
148 ... 'roles': List[int],
149 ... 'in_guild': bool
150 ... }
151
152 Alternatively, request users can be POSTed as a list of above objects,
153 in which case multiple users will be created at once. In this case,
154 the response is an empty list.
155
156 #### Status codes
157 - 201: returned on success
158 - 400: if one of the given roles does not exist, or one of the given fields is invalid
159 - 400: if multiple user objects with the same id are given
160
161 ### PUT /bot/users/<snowflake:int>
162 Update the user with the given `snowflake`.
163 All fields in the request body are required.
164
165 #### Request body
166 >>> {
167 ... 'id': int,
168 ... 'name': str,
169 ... 'discriminator': int,
170 ... 'roles': List[int],
171 ... 'in_guild': bool
172 ... }
173
174 #### Status codes
175 - 200: returned on success
176 - 400: if the request body was invalid, see response body for details
177 - 404: if the user with the given `snowflake` could not be found
178
179 ### PATCH /bot/users/<snowflake:int>
180 Update the user with the given `snowflake`.
181 All fields in the request body are optional.
182
183 #### Request body
184 >>> {
185 ... 'id': int,
186 ... 'name': str,
187 ... 'discriminator': int,
188 ... 'roles': List[int],
189 ... 'in_guild': bool
190 ... }
191
192 #### Status codes
193 - 200: returned on success
194 - 400: if the request body was invalid, see response body for details
195 - 404: if the user with the given `snowflake` could not be found
196
197 ### BULK PATCH /bot/users/bulk_patch
198 Update users with the given `ids` and `details`.
199 `id` field and at least one other field is mandatory.
200
201 #### Request body
202 >>> [
203 ... {
204 ... 'id': int,
205 ... 'name': str,
206 ... 'discriminator': int,
207 ... 'roles': List[int],
208 ... 'in_guild': bool
209 ... },
210 ... {
211 ... 'id': int,
212 ... 'name': str,
213 ... 'discriminator': int,
214 ... 'roles': List[int],
215 ... 'in_guild': bool
216 ... },
217 ... ]
218
219 #### Status codes
220 - 200: returned on success
221 - 400: if the request body was invalid, see response body for details
222 - 400: if multiple user objects with the same id are given
223 - 404: if the user with the given id does not exist
224
225 ### DELETE /bot/users/<snowflake:int>
226 Deletes the user with the given `snowflake`.
227
228 #### Status codes
229 - 204: returned on success
230 - 404: if a user with the given `snowflake` does not exist
231 """
232
233 serializer_class = UserSerializer
234 queryset = User.objects.all().order_by("id")
235 pagination_class = UserListPagination
236
237 def get_serializer(self, *args, **kwargs) -> ModelSerializer:
238 """Set Serializer many attribute to True if request body contains a list."""
239 if isinstance(kwargs.get('data', {}), list):
240 kwargs['many'] = True
241
242 return super().get_serializer(*args, **kwargs)
243
244 @action(detail=False, methods=["PATCH"], name='user-bulk-patch')
245 def bulk_patch(self, request: Request) -> Response:
246 """Update multiple User objects in a single request."""
247 serializer = self.get_serializer(
248 instance=self.get_queryset(),
249 data=request.data,
250 many=True,
251 partial=True
252 )
253
254 serializer.is_valid(raise_exception=True)
255 serializer.save()
256
257 return Response(serializer.data, status=status.HTTP_200_OK)
258
259 @action(detail=True)
260 def metricity_data(self, request: Request, pk: str = None) -> Response:
261 """Request handler for metricity_data endpoint."""
262 user = self.get_object()
263
264 has_voice_infraction = Infraction.objects.filter(
265 Q(user__id=user.id, active=True),
266 Q(type="voice_ban") | Q(type="voice_mute")
267 ).exists()
268
269 with Metricity() as metricity:
270 try:
271 data = metricity.user(user.id)
272
273 data["total_messages"] = metricity.total_messages(user.id)
274 data["activity_blocks"] = metricity.total_message_blocks(user.id)
275
276 data["voice_gate_blocked"] = has_voice_infraction
277 return Response(data, status=status.HTTP_200_OK)
278 except NotFoundError:
279 return Response(dict(detail="User not found in metricity"),
280 status=status.HTTP_404_NOT_FOUND)
281
282 @action(detail=True)
283 def metricity_review_data(self, request: Request, pk: str = None) -> Response:
284 """Request handler for metricity_review_data endpoint."""
285 user = self.get_object()
286
287 with Metricity() as metricity:
288 try:
289 data = metricity.user(user.id)
290 data["total_messages"] = metricity.total_messages(user.id)
291 data["top_channel_activity"] = metricity.top_channel_activity(user.id)
292 return Response(data, status=status.HTTP_200_OK)
293 except NotFoundError:
294 return Response(dict(detail="User not found in metricity"),
295 status=status.HTTP_404_NOT_FOUND)
296
[end of pydis_site/apps/api/viewsets/bot/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pydis_site/apps/api/viewsets/bot/user.py b/pydis_site/apps/api/viewsets/bot/user.py
--- a/pydis_site/apps/api/viewsets/bot/user.py
+++ b/pydis_site/apps/api/viewsets/bot/user.py
@@ -2,6 +2,7 @@
from collections import OrderedDict
from django.db.models import Q
+from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
@@ -77,6 +78,8 @@
... }
#### Optional Query Parameters
+ - username: username to search for
+ - discriminator: discriminator to search for
- page_size: number of Users in one page, defaults to 10,000
- page: page number
@@ -233,6 +236,8 @@
serializer_class = UserSerializer
queryset = User.objects.all().order_by("id")
pagination_class = UserListPagination
+ filter_backends = (DjangoFilterBackend,)
+ filter_fields = ('name', 'discriminator')
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
"""Set Serializer many attribute to True if request body contains a list."""
|
{"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/user.py b/pydis_site/apps/api/viewsets/bot/user.py\n--- a/pydis_site/apps/api/viewsets/bot/user.py\n+++ b/pydis_site/apps/api/viewsets/bot/user.py\n@@ -2,6 +2,7 @@\n from collections import OrderedDict\n \n from django.db.models import Q\n+from django_filters.rest_framework import DjangoFilterBackend\n from rest_framework import status\n from rest_framework.decorators import action\n from rest_framework.pagination import PageNumberPagination\n@@ -77,6 +78,8 @@\n ... }\n \n #### Optional Query Parameters\n+ - username: username to search for\n+ - discriminator: discriminator to search for\n - page_size: number of Users in one page, defaults to 10,000\n - page: page number\n \n@@ -233,6 +236,8 @@\n serializer_class = UserSerializer\n queryset = User.objects.all().order_by(\"id\")\n pagination_class = UserListPagination\n+ filter_backends = (DjangoFilterBackend,)\n+ filter_fields = ('name', 'discriminator')\n \n def get_serializer(self, *args, **kwargs) -> ModelSerializer:\n \"\"\"Set Serializer many attribute to True if request body contains a list.\"\"\"\n", "issue": "Add an endpoint to fetch user information from a username.\nWe'd need this endpoint to be able to implement features requested in [this](https://github.com/python-discord/bot/issues/1757) bot issue.\r\n\r\n```py\r\nfrom django.shortcuts import get_object_or_404\r\n\r\n @action(\r\n detail=False,\r\n methods=[\"GET\"],\r\n url_path=r\"username/(?P<name>[^#]{,32})#(?P<discriminator>[0-9]{4})\"\r\n )\r\n def get_by_username(self, request, name, discriminator):\r\n user = get_object_or_404(User, name=name, discriminator=discriminator)\r\n return Response(UserSerializer(user).data, status=status.HTTP_200_OK)\r\n```\r\n\r\nThis addition to the user view set seems to make GET requests to URLs like `bot/users/username/Qwerty#0001` work , but there might be a more ideal way to do this.\n", "before_files": [{"content": "import typing\nfrom collections import OrderedDict\n\nfrom django.db.models import Q\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom pydis_site.apps.api.models.bot.infraction import Infraction\nfrom pydis_site.apps.api.models.bot.metricity import Metricity, NotFoundError\nfrom pydis_site.apps.api.models.bot.user import User\nfrom pydis_site.apps.api.serializers import UserSerializer\n\n\nclass UserListPagination(PageNumberPagination):\n \"\"\"Custom pagination class for the User Model.\"\"\"\n\n page_size = 2500\n page_size_query_param = \"page_size\"\n\n def get_next_page_number(self) -> typing.Optional[int]:\n \"\"\"Get the next page number.\"\"\"\n if not self.page.has_next():\n return None\n page_number = self.page.next_page_number()\n return page_number\n\n def get_previous_page_number(self) -> typing.Optional[int]:\n \"\"\"Get the previous page number.\"\"\"\n if not self.page.has_previous():\n return None\n\n page_number = self.page.previous_page_number()\n return page_number\n\n def get_paginated_response(self, data: list) -> Response:\n \"\"\"Override method to send modified response.\"\"\"\n return Response(OrderedDict([\n ('count', self.page.paginator.count),\n ('next_page_no', self.get_next_page_number()),\n ('previous_page_no', self.get_previous_page_number()),\n ('results', data)\n ]))\n\n\nclass UserViewSet(ModelViewSet):\n \"\"\"\n View providing CRUD operations on Discord users through the bot.\n\n ## Routes\n ### GET /bot/users\n Returns all users currently known with pagination.\n\n #### Response format\n >>> {\n ... 'count': 95000,\n ... 'next_page_no': \"2\",\n ... 'previous_page_no': None,\n ... 'results': [\n ... {\n ... 'id': 409107086526644234,\n ... 'name': \"Python\",\n ... 'discriminator': 4329,\n ... 'roles': [\n ... 352427296948486144,\n ... 270988689419665409,\n ... 277546923144249364,\n ... 458226699344019457\n ... ],\n ... 'in_guild': True\n ... },\n ... ]\n ... }\n\n #### Optional Query Parameters\n - page_size: number of Users in one page, defaults to 10,000\n - page: page number\n\n #### Status codes\n - 200: returned on success\n\n ### GET /bot/users/<snowflake:int>\n Gets a single user by ID.\n\n #### Response format\n >>> {\n ... 'id': 409107086526644234,\n ... 'name': \"Python\",\n ... 'discriminator': 4329,\n ... 'roles': [\n ... 352427296948486144,\n ... 270988689419665409,\n ... 277546923144249364,\n ... 458226699344019457\n ... ],\n ... 'in_guild': True\n ... }\n\n #### Status codes\n - 200: returned on success\n - 404: if a user with the given `snowflake` could not be found\n\n ### GET /bot/users/<snowflake:int>/metricity_data\n Gets metricity data for a single user by ID.\n\n #### Response format\n >>> {\n ... \"joined_at\": \"2020-10-06T21:54:23.540766\",\n ... \"total_messages\": 2,\n ... \"voice_banned\": False,\n ... \"activity_blocks\": 1\n ...}\n\n #### Status codes\n - 200: returned on success\n - 404: if a user with the given `snowflake` could not be found\n\n ### GET /bot/users/<snowflake:int>/metricity_review_data\n Gets metricity data for a single user's review by ID.\n\n #### Response format\n >>> {\n ... 'joined_at': '2020-08-26T08:09:43.507000',\n ... 'top_channel_activity': [['off-topic', 15],\n ... ['talent-pool', 4],\n ... ['defcon', 2]],\n ... 'total_messages': 22\n ... }\n\n #### Status codes\n - 200: returned on success\n - 404: if a user with the given `snowflake` could not be found\n\n ### POST /bot/users\n Adds a single or multiple new users.\n The roles attached to the user(s) must be roles known by the site.\n Users that already exist in the database will be skipped.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'name': str,\n ... 'discriminator': int,\n ... 'roles': List[int],\n ... 'in_guild': bool\n ... }\n\n Alternatively, request users can be POSTed as a list of above objects,\n in which case multiple users will be created at once. In this case,\n the response is an empty list.\n\n #### Status codes\n - 201: returned on success\n - 400: if one of the given roles does not exist, or one of the given fields is invalid\n - 400: if multiple user objects with the same id are given\n\n ### PUT /bot/users/<snowflake:int>\n Update the user with the given `snowflake`.\n All fields in the request body are required.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'name': str,\n ... 'discriminator': int,\n ... 'roles': List[int],\n ... 'in_guild': bool\n ... }\n\n #### Status codes\n - 200: returned on success\n - 400: if the request body was invalid, see response body for details\n - 404: if the user with the given `snowflake` could not be found\n\n ### PATCH /bot/users/<snowflake:int>\n Update the user with the given `snowflake`.\n All fields in the request body are optional.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'name': str,\n ... 'discriminator': int,\n ... 'roles': List[int],\n ... 'in_guild': bool\n ... }\n\n #### Status codes\n - 200: returned on success\n - 400: if the request body was invalid, see response body for details\n - 404: if the user with the given `snowflake` could not be found\n\n ### BULK PATCH /bot/users/bulk_patch\n Update users with the given `ids` and `details`.\n `id` field and at least one other field is mandatory.\n\n #### Request body\n >>> [\n ... {\n ... 'id': int,\n ... 'name': str,\n ... 'discriminator': int,\n ... 'roles': List[int],\n ... 'in_guild': bool\n ... },\n ... {\n ... 'id': int,\n ... 'name': str,\n ... 'discriminator': int,\n ... 'roles': List[int],\n ... 'in_guild': bool\n ... },\n ... ]\n\n #### Status codes\n - 200: returned on success\n - 400: if the request body was invalid, see response body for details\n - 400: if multiple user objects with the same id are given\n - 404: if the user with the given id does not exist\n\n ### DELETE /bot/users/<snowflake:int>\n Deletes the user with the given `snowflake`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a user with the given `snowflake` does not exist\n \"\"\"\n\n serializer_class = UserSerializer\n queryset = User.objects.all().order_by(\"id\")\n pagination_class = UserListPagination\n\n def get_serializer(self, *args, **kwargs) -> ModelSerializer:\n \"\"\"Set Serializer many attribute to True if request body contains a list.\"\"\"\n if isinstance(kwargs.get('data', {}), list):\n kwargs['many'] = True\n\n return super().get_serializer(*args, **kwargs)\n\n @action(detail=False, methods=[\"PATCH\"], name='user-bulk-patch')\n def bulk_patch(self, request: Request) -> Response:\n \"\"\"Update multiple User objects in a single request.\"\"\"\n serializer = self.get_serializer(\n instance=self.get_queryset(),\n data=request.data,\n many=True,\n partial=True\n )\n\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(detail=True)\n def metricity_data(self, request: Request, pk: str = None) -> Response:\n \"\"\"Request handler for metricity_data endpoint.\"\"\"\n user = self.get_object()\n\n has_voice_infraction = Infraction.objects.filter(\n Q(user__id=user.id, active=True),\n Q(type=\"voice_ban\") | Q(type=\"voice_mute\")\n ).exists()\n\n with Metricity() as metricity:\n try:\n data = metricity.user(user.id)\n\n data[\"total_messages\"] = metricity.total_messages(user.id)\n data[\"activity_blocks\"] = metricity.total_message_blocks(user.id)\n\n data[\"voice_gate_blocked\"] = has_voice_infraction\n return Response(data, status=status.HTTP_200_OK)\n except NotFoundError:\n return Response(dict(detail=\"User not found in metricity\"),\n status=status.HTTP_404_NOT_FOUND)\n\n @action(detail=True)\n def metricity_review_data(self, request: Request, pk: str = None) -> Response:\n \"\"\"Request handler for metricity_review_data endpoint.\"\"\"\n user = self.get_object()\n\n with Metricity() as metricity:\n try:\n data = metricity.user(user.id)\n data[\"total_messages\"] = metricity.total_messages(user.id)\n data[\"top_channel_activity\"] = metricity.top_channel_activity(user.id)\n return Response(data, status=status.HTTP_200_OK)\n except NotFoundError:\n return Response(dict(detail=\"User not found in metricity\"),\n status=status.HTTP_404_NOT_FOUND)\n", "path": "pydis_site/apps/api/viewsets/bot/user.py"}]}
| 4,058 | 279 |
gh_patches_debug_8552
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-323
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing shellcraft submodules doesn't work
```
$ python
>>> import pwnlib.shellcraft.arm
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named arm
```
</issue>
<code>
[start of pwnlib/shellcraft/__init__.py]
1 from types import ModuleType
2 import sys, os, re
3 from . import internal
4 from ..context import context
5
6 class module(ModuleType):
7 def __init__(self, name, directory):
8 super(module, self).__init__(name)
9
10 # Insert nice properties
11 self.__dict__.update({
12 '__file__': __file__,
13 '__package__': __package__,
14 '__path__': __path__,
15 })
16
17 # Save the shellcode directory
18 self._dir = directory
19
20 # Find the absolute path of the directory
21 self._absdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', self._dir)
22
23 # Get the docstring
24 with open(os.path.join(self._absdir, "__doc__")) as fd:
25 self.__doc__ = fd.read()
26
27 # Insert into the module list
28 sys.modules[self.__name__] = self
29
30 def __lazyinit__(self):
31
32 # Create a dictionary of submodules
33 self._submodules = {}
34 self._shellcodes = {}
35 for name in os.listdir(self._absdir):
36 path = os.path.join(self._absdir, name)
37 if os.path.isdir(path):
38 self._submodules[name] = module(self.__name__ + '.' + name, os.path.join(self._dir, name))
39 elif os.path.isfile(path) and name != '__doc__' and name[0] != '.':
40 funcname, _ext = os.path.splitext(name)
41 if not re.match('^[a-zA-Z][a-zA-Z0-9_]*$', funcname):
42 raise ValueError("found illegal filename, %r" % name)
43 self._shellcodes[funcname] = name
44
45 # Put the submodules into toplevel
46 self.__dict__.update(self._submodules)
47
48 # These are exported
49 self.__all__ = sorted(self._shellcodes.keys() + self._submodules.keys())
50
51 # Make sure this is not called again
52 self.__lazyinit__ = None
53
54 def __getattr__(self, key):
55 self.__lazyinit__ and self.__lazyinit__()
56
57 # Maybe the lazyinit added it
58 if key in self.__dict__:
59 return self.__dict__[key]
60
61 # This function lazy-loads the shellcodes
62 if key in self._shellcodes:
63 real = internal.make_function(key, self._shellcodes[key], self._dir)
64 setattr(self, key, real)
65 return real
66
67 for m in self._context_modules():
68 try:
69 return getattr(m, key)
70 except AttributeError:
71 pass
72
73 raise AttributeError("'module' object has no attribute '%s'" % key)
74
75 def __dir__(self):
76 # This function lists the available submodules, available shellcodes
77 # and potentially shellcodes available in submodules that should be
78 # avilable because of the context
79 self.__lazyinit__ and self.__lazyinit__()
80
81 result = list(self._submodules.keys())
82 result.extend(('__file__', '__package__', '__path__',
83 '__all__', '__name__'))
84 result.extend(self.__shellcodes__())
85
86 return result
87
88 def _context_modules(self):
89 self.__lazyinit__ and self.__lazyinit__()
90 for k, m in self._submodules.items():
91 if k in [context.arch, context.os]:
92 yield m
93
94 def __shellcodes__(self):
95 self.__lazyinit__ and self.__lazyinit__()
96 result = self._shellcodes.keys()
97 for m in self._context_modules():
98 result.extend(m.__shellcodes__())
99 return result
100
101 # To prevent garbage collection
102 tether = sys.modules[__name__]
103
104 # Create the module structure
105 module(__name__, '')
106
[end of pwnlib/shellcraft/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwnlib/shellcraft/__init__.py b/pwnlib/shellcraft/__init__.py
--- a/pwnlib/shellcraft/__init__.py
+++ b/pwnlib/shellcraft/__init__.py
@@ -102,4 +102,22 @@
tether = sys.modules[__name__]
# Create the module structure
-module(__name__, '')
+shellcraft = module(__name__, '')
+
+class LazyImporter:
+ def find_module(self, fullname, path):
+ if not fullname.startswith('pwnlib.shellcraft.'):
+ return None
+
+ parts = fullname.split('.')[2:]
+ cur = shellcraft
+ for part in parts:
+ cur = getattr(cur, part, None)
+ if not isinstance(cur, ModuleType):
+ return None
+
+ return self
+
+ def load_module(self, fullname):
+ return sys.modules[fullname]
+sys.meta_path.append(LazyImporter())
|
{"golden_diff": "diff --git a/pwnlib/shellcraft/__init__.py b/pwnlib/shellcraft/__init__.py\n--- a/pwnlib/shellcraft/__init__.py\n+++ b/pwnlib/shellcraft/__init__.py\n@@ -102,4 +102,22 @@\n tether = sys.modules[__name__]\n \n # Create the module structure\n-module(__name__, '')\n+shellcraft = module(__name__, '')\n+\n+class LazyImporter:\n+ def find_module(self, fullname, path):\n+ if not fullname.startswith('pwnlib.shellcraft.'):\n+ return None\n+\n+ parts = fullname.split('.')[2:]\n+ cur = shellcraft\n+ for part in parts:\n+ cur = getattr(cur, part, None)\n+ if not isinstance(cur, ModuleType):\n+ return None\n+\n+ return self\n+\n+ def load_module(self, fullname):\n+ return sys.modules[fullname]\n+sys.meta_path.append(LazyImporter())\n", "issue": "Importing shellcraft submodules doesn't work\n```\n$ python\n>>> import pwnlib.shellcraft.arm\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nImportError: No module named arm\n```\n\n", "before_files": [{"content": "from types import ModuleType\nimport sys, os, re\nfrom . import internal\nfrom ..context import context\n\nclass module(ModuleType):\n def __init__(self, name, directory):\n super(module, self).__init__(name)\n\n # Insert nice properties\n self.__dict__.update({\n '__file__': __file__,\n '__package__': __package__,\n '__path__': __path__,\n })\n\n # Save the shellcode directory\n self._dir = directory\n\n # Find the absolute path of the directory\n self._absdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', self._dir)\n\n # Get the docstring\n with open(os.path.join(self._absdir, \"__doc__\")) as fd:\n self.__doc__ = fd.read()\n\n # Insert into the module list\n sys.modules[self.__name__] = self\n\n def __lazyinit__(self):\n\n # Create a dictionary of submodules\n self._submodules = {}\n self._shellcodes = {}\n for name in os.listdir(self._absdir):\n path = os.path.join(self._absdir, name)\n if os.path.isdir(path):\n self._submodules[name] = module(self.__name__ + '.' + name, os.path.join(self._dir, name))\n elif os.path.isfile(path) and name != '__doc__' and name[0] != '.':\n funcname, _ext = os.path.splitext(name)\n if not re.match('^[a-zA-Z][a-zA-Z0-9_]*$', funcname):\n raise ValueError(\"found illegal filename, %r\" % name)\n self._shellcodes[funcname] = name\n\n # Put the submodules into toplevel\n self.__dict__.update(self._submodules)\n\n # These are exported\n self.__all__ = sorted(self._shellcodes.keys() + self._submodules.keys())\n\n # Make sure this is not called again\n self.__lazyinit__ = None\n\n def __getattr__(self, key):\n self.__lazyinit__ and self.__lazyinit__()\n\n # Maybe the lazyinit added it\n if key in self.__dict__:\n return self.__dict__[key]\n\n # This function lazy-loads the shellcodes\n if key in self._shellcodes:\n real = internal.make_function(key, self._shellcodes[key], self._dir)\n setattr(self, key, real)\n return real\n\n for m in self._context_modules():\n try:\n return getattr(m, key)\n except AttributeError:\n pass\n\n raise AttributeError(\"'module' object has no attribute '%s'\" % key)\n\n def __dir__(self):\n # This function lists the available submodules, available shellcodes\n # and potentially shellcodes available in submodules that should be\n # avilable because of the context\n self.__lazyinit__ and self.__lazyinit__()\n\n result = list(self._submodules.keys())\n result.extend(('__file__', '__package__', '__path__',\n '__all__', '__name__'))\n result.extend(self.__shellcodes__())\n\n return result\n\n def _context_modules(self):\n self.__lazyinit__ and self.__lazyinit__()\n for k, m in self._submodules.items():\n if k in [context.arch, context.os]:\n yield m\n\n def __shellcodes__(self):\n self.__lazyinit__ and self.__lazyinit__()\n result = self._shellcodes.keys()\n for m in self._context_modules():\n result.extend(m.__shellcodes__())\n return result\n\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\n# Create the module structure\nmodule(__name__, '')\n", "path": "pwnlib/shellcraft/__init__.py"}]}
| 1,630 | 215 |
gh_patches_debug_26773
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-4940
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix `Test` warnings from `BoTorch`
### Motivation
Resolve following warnings from test:
```
tests/terminator_tests/improvement_tests/gp_tests/test_botorch.py: 4 warnings
tests/visualization_tests/test_terminator_improvement.py: 46 warnings
/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/botorch/fit.py:139: DeprecationWarning:
`fit_gpytorch_model` is marked for deprecation, consider using `fit_gpytorch_mll` instead.
```
### Suggestion
Look for the proper way to fix this warning (Hopefully, just replacing `fit_gpytorch_model` with `fit_gpytorch_mll`).
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/terminator/improvement/gp/botorch.py]
1 from __future__ import annotations
2
3 from typing import Optional
4
5 import numpy as np
6
7 from optuna._imports import try_import
8 from optuna.distributions import _is_distribution_log
9 from optuna.distributions import CategoricalDistribution
10 from optuna.distributions import FloatDistribution
11 from optuna.distributions import IntDistribution
12 from optuna.search_space import intersection_search_space
13 from optuna.terminator.improvement.gp.base import BaseGaussianProcess
14 from optuna.trial._frozen import FrozenTrial
15 from optuna.trial._state import TrialState
16
17
18 with try_import() as _imports:
19 from botorch.fit import fit_gpytorch_model
20 from botorch.models import SingleTaskGP
21 from botorch.models.transforms import Normalize
22 from botorch.models.transforms import Standardize
23 import gpytorch
24 import torch
25
26 __all__ = [
27 "fit_gpytorch_model",
28 "SingleTaskGP",
29 "Normalize",
30 "Standardize",
31 "gpytorch",
32 "torch",
33 ]
34
35
36 class _BoTorchGaussianProcess(BaseGaussianProcess):
37 def __init__(self) -> None:
38 _imports.check()
39
40 self._gp: Optional[SingleTaskGP] = None
41
42 def fit(
43 self,
44 trials: list[FrozenTrial],
45 ) -> None:
46 self._trials = trials
47
48 x, bounds = _convert_trials_to_tensors(trials)
49
50 n_params = x.shape[1]
51
52 y = torch.tensor([trial.value for trial in trials], dtype=torch.float64)
53 y = torch.unsqueeze(y, 1)
54
55 self._gp = SingleTaskGP(
56 x,
57 y,
58 input_transform=Normalize(d=n_params, bounds=bounds),
59 outcome_transform=Standardize(m=1),
60 )
61
62 mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)
63
64 fit_gpytorch_model(mll)
65
66 def predict_mean_std(
67 self,
68 trials: list[FrozenTrial],
69 ) -> tuple[np.ndarray, np.ndarray]:
70 assert self._gp is not None
71
72 x, _ = _convert_trials_to_tensors(trials)
73
74 with torch.no_grad(), gpytorch.settings.fast_pred_var():
75 posterior = self._gp.posterior(x)
76 mean = posterior.mean
77 variance = posterior.variance
78 std = variance.sqrt()
79
80 return mean.detach().numpy(), std.detach().numpy()
81
82
83 def _convert_trials_to_tensors(trials: list[FrozenTrial]) -> tuple[torch.Tensor, torch.Tensor]:
84 """Convert a list of FrozenTrial objects to tensors inputs and bounds.
85
86 This function assumes the following condition for input trials:
87 - any categorical param is converted to a float or int one;
88 - log is unscaled for any float/int distribution;
89 - the state is COMPLETE for any trial;
90 - direction is MINIMIZE for any trial.
91 """
92 search_space = intersection_search_space(trials)
93 sorted_params = sorted(search_space.keys())
94
95 x = []
96 for trial in trials:
97 assert trial.state == TrialState.COMPLETE
98 x_row = []
99 for param in sorted_params:
100 distribution = search_space[param]
101
102 assert not _is_distribution_log(distribution)
103 assert not isinstance(distribution, CategoricalDistribution)
104
105 param_value = float(trial.params[param])
106 x_row.append(param_value)
107
108 x.append(x_row)
109
110 min_bounds = []
111 max_bounds = []
112 for param, distribution in search_space.items():
113 assert isinstance(distribution, (FloatDistribution, IntDistribution))
114 min_bounds.append(distribution.low)
115 max_bounds.append(distribution.high)
116 bounds = [min_bounds, max_bounds]
117
118 return torch.tensor(x, dtype=torch.float64), torch.tensor(bounds, dtype=torch.float64)
119
[end of optuna/terminator/improvement/gp/botorch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/terminator/improvement/gp/botorch.py b/optuna/terminator/improvement/gp/botorch.py
--- a/optuna/terminator/improvement/gp/botorch.py
+++ b/optuna/terminator/improvement/gp/botorch.py
@@ -3,6 +3,7 @@
from typing import Optional
import numpy as np
+from packaging import version
from optuna._imports import try_import
from optuna.distributions import _is_distribution_log
@@ -16,15 +17,20 @@
with try_import() as _imports:
- from botorch.fit import fit_gpytorch_model
+ import botorch
from botorch.models import SingleTaskGP
from botorch.models.transforms import Normalize
from botorch.models.transforms import Standardize
import gpytorch
import torch
+ if version.parse(botorch.version.version) < version.parse("0.8.0"):
+ from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll
+ else:
+ from botorch.fit import fit_gpytorch_mll
+
__all__ = [
- "fit_gpytorch_model",
+ "fit_gpytorch_mll",
"SingleTaskGP",
"Normalize",
"Standardize",
@@ -61,7 +67,7 @@
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)
- fit_gpytorch_model(mll)
+ fit_gpytorch_mll(mll)
def predict_mean_std(
self,
|
{"golden_diff": "diff --git a/optuna/terminator/improvement/gp/botorch.py b/optuna/terminator/improvement/gp/botorch.py\n--- a/optuna/terminator/improvement/gp/botorch.py\n+++ b/optuna/terminator/improvement/gp/botorch.py\n@@ -3,6 +3,7 @@\n from typing import Optional\n \n import numpy as np\n+from packaging import version\n \n from optuna._imports import try_import\n from optuna.distributions import _is_distribution_log\n@@ -16,15 +17,20 @@\n \n \n with try_import() as _imports:\n- from botorch.fit import fit_gpytorch_model\n+ import botorch\n from botorch.models import SingleTaskGP\n from botorch.models.transforms import Normalize\n from botorch.models.transforms import Standardize\n import gpytorch\n import torch\n \n+ if version.parse(botorch.version.version) < version.parse(\"0.8.0\"):\n+ from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll\n+ else:\n+ from botorch.fit import fit_gpytorch_mll\n+\n __all__ = [\n- \"fit_gpytorch_model\",\n+ \"fit_gpytorch_mll\",\n \"SingleTaskGP\",\n \"Normalize\",\n \"Standardize\",\n@@ -61,7 +67,7 @@\n \n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)\n \n- fit_gpytorch_model(mll)\n+ fit_gpytorch_mll(mll)\n \n def predict_mean_std(\n self,\n", "issue": "Fix `Test` warnings from `BoTorch`\n### Motivation\n\nResolve following warnings from test:\r\n```\r\ntests/terminator_tests/improvement_tests/gp_tests/test_botorch.py: 4 warnings\r\ntests/visualization_tests/test_terminator_improvement.py: 46 warnings\r\n /opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/botorch/fit.py:139: DeprecationWarning:\r\n \r\n `fit_gpytorch_model` is marked for deprecation, consider using `fit_gpytorch_mll` instead.\r\n```\n\n### Suggestion\n\nLook for the proper way to fix this warning (Hopefully, just replacing `fit_gpytorch_model` with `fit_gpytorch_mll`).\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Optional\n\nimport numpy as np\n\nfrom optuna._imports import try_import\nfrom optuna.distributions import _is_distribution_log\nfrom optuna.distributions import CategoricalDistribution\nfrom optuna.distributions import FloatDistribution\nfrom optuna.distributions import IntDistribution\nfrom optuna.search_space import intersection_search_space\nfrom optuna.terminator.improvement.gp.base import BaseGaussianProcess\nfrom optuna.trial._frozen import FrozenTrial\nfrom optuna.trial._state import TrialState\n\n\nwith try_import() as _imports:\n from botorch.fit import fit_gpytorch_model\n from botorch.models import SingleTaskGP\n from botorch.models.transforms import Normalize\n from botorch.models.transforms import Standardize\n import gpytorch\n import torch\n\n__all__ = [\n \"fit_gpytorch_model\",\n \"SingleTaskGP\",\n \"Normalize\",\n \"Standardize\",\n \"gpytorch\",\n \"torch\",\n]\n\n\nclass _BoTorchGaussianProcess(BaseGaussianProcess):\n def __init__(self) -> None:\n _imports.check()\n\n self._gp: Optional[SingleTaskGP] = None\n\n def fit(\n self,\n trials: list[FrozenTrial],\n ) -> None:\n self._trials = trials\n\n x, bounds = _convert_trials_to_tensors(trials)\n\n n_params = x.shape[1]\n\n y = torch.tensor([trial.value for trial in trials], dtype=torch.float64)\n y = torch.unsqueeze(y, 1)\n\n self._gp = SingleTaskGP(\n x,\n y,\n input_transform=Normalize(d=n_params, bounds=bounds),\n outcome_transform=Standardize(m=1),\n )\n\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self._gp.likelihood, self._gp)\n\n fit_gpytorch_model(mll)\n\n def predict_mean_std(\n self,\n trials: list[FrozenTrial],\n ) -> tuple[np.ndarray, np.ndarray]:\n assert self._gp is not None\n\n x, _ = _convert_trials_to_tensors(trials)\n\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n posterior = self._gp.posterior(x)\n mean = posterior.mean\n variance = posterior.variance\n std = variance.sqrt()\n\n return mean.detach().numpy(), std.detach().numpy()\n\n\ndef _convert_trials_to_tensors(trials: list[FrozenTrial]) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Convert a list of FrozenTrial objects to tensors inputs and bounds.\n\n This function assumes the following condition for input trials:\n - any categorical param is converted to a float or int one;\n - log is unscaled for any float/int distribution;\n - the state is COMPLETE for any trial;\n - direction is MINIMIZE for any trial.\n \"\"\"\n search_space = intersection_search_space(trials)\n sorted_params = sorted(search_space.keys())\n\n x = []\n for trial in trials:\n assert trial.state == TrialState.COMPLETE\n x_row = []\n for param in sorted_params:\n distribution = search_space[param]\n\n assert not _is_distribution_log(distribution)\n assert not isinstance(distribution, CategoricalDistribution)\n\n param_value = float(trial.params[param])\n x_row.append(param_value)\n\n x.append(x_row)\n\n min_bounds = []\n max_bounds = []\n for param, distribution in search_space.items():\n assert isinstance(distribution, (FloatDistribution, IntDistribution))\n min_bounds.append(distribution.low)\n max_bounds.append(distribution.high)\n bounds = [min_bounds, max_bounds]\n\n return torch.tensor(x, dtype=torch.float64), torch.tensor(bounds, dtype=torch.float64)\n", "path": "optuna/terminator/improvement/gp/botorch.py"}]}
| 1,808 | 373 |
gh_patches_debug_40268
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2607
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Placeholder cycle per app
The placeholder stores the current color in a classvar.
This means that a second app will start where the previous app left off. And may return a different color from the first iteration.
Not much of an issue in practice, but in the docs the colors change every screenshot. I think we need a separate cycle per app. Suggest we have an independent sequence for each app instance.
</issue>
<code>
[start of docs/examples/styles/width_comparison.py]
1 from textual.app import App
2 from textual.containers import Horizontal
3 from textual.widgets import Placeholder, Label, Static
4
5
6 class Ruler(Static):
7 def compose(self):
8 ruler_text = "····•" * 100
9 yield Label(ruler_text)
10
11
12 class HeightComparisonApp(App):
13 def compose(self):
14 yield Horizontal(
15 Placeholder(id="cells"), # (1)!
16 Placeholder(id="percent"),
17 Placeholder(id="w"),
18 Placeholder(id="h"),
19 Placeholder(id="vw"),
20 Placeholder(id="vh"),
21 Placeholder(id="auto"),
22 Placeholder(id="fr1"),
23 Placeholder(id="fr3"),
24 )
25 yield Ruler()
26
27
28 app = HeightComparisonApp(css_path="width_comparison.css")
29
[end of docs/examples/styles/width_comparison.py]
[start of src/textual/widgets/_placeholder.py]
1 """Provides a Textual placeholder widget; useful when designing an app's layout."""
2
3 from __future__ import annotations
4
5 from itertools import cycle
6
7 from rich.console import RenderableType
8 from typing_extensions import Literal, Self
9
10 from .. import events
11 from ..css._error_tools import friendly_list
12 from ..reactive import Reactive, reactive
13 from ..widget import Widget
14
15 PlaceholderVariant = Literal["default", "size", "text"]
16 """The different variants of placeholder."""
17
18 _VALID_PLACEHOLDER_VARIANTS_ORDERED: list[PlaceholderVariant] = [
19 "default",
20 "size",
21 "text",
22 ]
23 _VALID_PLACEHOLDER_VARIANTS: set[PlaceholderVariant] = set(
24 _VALID_PLACEHOLDER_VARIANTS_ORDERED
25 )
26 _PLACEHOLDER_BACKGROUND_COLORS = [
27 "#881177",
28 "#aa3355",
29 "#cc6666",
30 "#ee9944",
31 "#eedd00",
32 "#99dd55",
33 "#44dd88",
34 "#22ccbb",
35 "#00bbcc",
36 "#0099cc",
37 "#3366bb",
38 "#663399",
39 ]
40 _LOREM_IPSUM_PLACEHOLDER_TEXT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam feugiat ac elit sit amet accumsan. Suspendisse bibendum nec libero quis gravida. Phasellus id eleifend ligula. Nullam imperdiet sem tellus, sed vehicula nisl faucibus sit amet. Praesent iaculis tempor ultricies. Sed lacinia, tellus id rutrum lacinia, sapien sapien congue mauris, sit amet pellentesque quam quam vel nisl. Curabitur vulputate erat pellentesque mauris posuere, non dictum risus mattis."
41
42
43 class InvalidPlaceholderVariant(Exception):
44 """Raised when an invalid Placeholder variant is set."""
45
46
47 class Placeholder(Widget):
48 """A simple placeholder widget to use before you build your custom widgets.
49
50 This placeholder has a couple of variants that show different data.
51 Clicking the placeholder cycles through the available variants, but a placeholder
52 can also be initialised in a specific variant.
53
54 The variants available are:
55
56 | Variant | Placeholder shows |
57 |---------|------------------------------------------------|
58 | default | Identifier label or the ID of the placeholder. |
59 | size | Size of the placeholder. |
60 | text | Lorem Ipsum text. |
61 """
62
63 DEFAULT_CSS = """
64 Placeholder {
65 content-align: center middle;
66 overflow: hidden;
67 color: $text;
68 }
69 Placeholder.-text {
70 padding: 1;
71 }
72 """
73
74 # Consecutive placeholders get assigned consecutive colors.
75 _COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)
76 _SIZE_RENDER_TEMPLATE = "[b]{} x {}[/b]"
77
78 variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant]("default")
79
80 _renderables: dict[PlaceholderVariant, str]
81
82 @classmethod
83 def reset_color_cycle(cls) -> None:
84 """Reset the placeholder background color cycle."""
85 cls._COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)
86
87 def __init__(
88 self,
89 label: str | None = None,
90 variant: PlaceholderVariant = "default",
91 *,
92 name: str | None = None,
93 id: str | None = None,
94 classes: str | None = None,
95 ) -> None:
96 """Create a Placeholder widget.
97
98 Args:
99 label: The label to identify the placeholder.
100 If no label is present, uses the placeholder ID instead.
101 variant: The variant of the placeholder.
102 name: The name of the placeholder.
103 id: The ID of the placeholder in the DOM.
104 classes: A space separated string with the CSS classes
105 of the placeholder, if any.
106 """
107 # Create and cache renderables for all the variants.
108 self._renderables = {
109 "default": label if label else f"#{id}" if id else "Placeholder",
110 "size": "",
111 "text": "\n\n".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)),
112 }
113
114 super().__init__(name=name, id=id, classes=classes)
115
116 self.styles.background = f"{next(Placeholder._COLORS)} 50%"
117
118 self.variant = self.validate_variant(variant)
119 """The current variant of the placeholder."""
120
121 # Set a cycle through the variants with the correct starting point.
122 self._variants_cycle = cycle(_VALID_PLACEHOLDER_VARIANTS_ORDERED)
123 while next(self._variants_cycle) != self.variant:
124 pass
125
126 def render(self) -> RenderableType:
127 """Render the placeholder.
128
129 Returns:
130 The value to render.
131 """
132 return self._renderables[self.variant]
133
134 def cycle_variant(self) -> Self:
135 """Get the next variant in the cycle.
136
137 Returns:
138 The `Placeholder` instance.
139 """
140 self.variant = next(self._variants_cycle)
141 return self
142
143 def watch_variant(
144 self, old_variant: PlaceholderVariant, variant: PlaceholderVariant
145 ) -> None:
146 self.remove_class(f"-{old_variant}")
147 self.add_class(f"-{variant}")
148
149 def validate_variant(self, variant: PlaceholderVariant) -> PlaceholderVariant:
150 """Validate the variant to which the placeholder was set."""
151 if variant not in _VALID_PLACEHOLDER_VARIANTS:
152 raise InvalidPlaceholderVariant(
153 "Valid placeholder variants are "
154 + f"{friendly_list(_VALID_PLACEHOLDER_VARIANTS)}"
155 )
156 return variant
157
158 async def _on_click(self, _: events.Click) -> None:
159 """Click handler to cycle through the placeholder variants."""
160 self.cycle_variant()
161
162 def _on_resize(self, event: events.Resize) -> None:
163 """Update the placeholder "size" variant with the new placeholder size."""
164 self._renderables["size"] = self._SIZE_RENDER_TEMPLATE.format(*event.size)
165 if self.variant == "size":
166 self.refresh()
167
[end of src/textual/widgets/_placeholder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/examples/styles/width_comparison.py b/docs/examples/styles/width_comparison.py
--- a/docs/examples/styles/width_comparison.py
+++ b/docs/examples/styles/width_comparison.py
@@ -1,6 +1,6 @@
from textual.app import App
from textual.containers import Horizontal
-from textual.widgets import Placeholder, Label, Static
+from textual.widgets import Label, Placeholder, Static
class Ruler(Static):
@@ -9,7 +9,7 @@
yield Label(ruler_text)
-class HeightComparisonApp(App):
+class WidthComparisonApp(App):
def compose(self):
yield Horizontal(
Placeholder(id="cells"), # (1)!
@@ -25,4 +25,6 @@
yield Ruler()
-app = HeightComparisonApp(css_path="width_comparison.css")
+app = WidthComparisonApp(css_path="width_comparison.css")
+if __name__ == "__main__":
+ app.run()
diff --git a/src/textual/widgets/_placeholder.py b/src/textual/widgets/_placeholder.py
--- a/src/textual/widgets/_placeholder.py
+++ b/src/textual/widgets/_placeholder.py
@@ -3,10 +3,14 @@
from __future__ import annotations
from itertools import cycle
+from typing import Iterator
+from weakref import WeakKeyDictionary
from rich.console import RenderableType
from typing_extensions import Literal, Self
+from textual.app import App
+
from .. import events
from ..css._error_tools import friendly_list
from ..reactive import Reactive, reactive
@@ -72,18 +76,13 @@
"""
# Consecutive placeholders get assigned consecutive colors.
- _COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)
+ _COLORS: WeakKeyDictionary[App, Iterator[str]] = WeakKeyDictionary()
_SIZE_RENDER_TEMPLATE = "[b]{} x {}[/b]"
variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant]("default")
_renderables: dict[PlaceholderVariant, str]
- @classmethod
- def reset_color_cycle(cls) -> None:
- """Reset the placeholder background color cycle."""
- cls._COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)
-
def __init__(
self,
label: str | None = None,
@@ -113,8 +112,6 @@
super().__init__(name=name, id=id, classes=classes)
- self.styles.background = f"{next(Placeholder._COLORS)} 50%"
-
self.variant = self.validate_variant(variant)
"""The current variant of the placeholder."""
@@ -123,6 +120,13 @@
while next(self._variants_cycle) != self.variant:
pass
+ def on_mount(self) -> None:
+ """Set the color for this placeholder."""
+ colors = Placeholder._COLORS.setdefault(
+ self.app, cycle(_PLACEHOLDER_BACKGROUND_COLORS)
+ )
+ self.styles.background = f"{next(colors)} 50%"
+
def render(self) -> RenderableType:
"""Render the placeholder.
|
{"golden_diff": "diff --git a/docs/examples/styles/width_comparison.py b/docs/examples/styles/width_comparison.py\n--- a/docs/examples/styles/width_comparison.py\n+++ b/docs/examples/styles/width_comparison.py\n@@ -1,6 +1,6 @@\n from textual.app import App\n from textual.containers import Horizontal\n-from textual.widgets import Placeholder, Label, Static\n+from textual.widgets import Label, Placeholder, Static\n \n \n class Ruler(Static):\n@@ -9,7 +9,7 @@\n yield Label(ruler_text)\n \n \n-class HeightComparisonApp(App):\n+class WidthComparisonApp(App):\n def compose(self):\n yield Horizontal(\n Placeholder(id=\"cells\"), # (1)!\n@@ -25,4 +25,6 @@\n yield Ruler()\n \n \n-app = HeightComparisonApp(css_path=\"width_comparison.css\")\n+app = WidthComparisonApp(css_path=\"width_comparison.css\")\n+if __name__ == \"__main__\":\n+ app.run()\ndiff --git a/src/textual/widgets/_placeholder.py b/src/textual/widgets/_placeholder.py\n--- a/src/textual/widgets/_placeholder.py\n+++ b/src/textual/widgets/_placeholder.py\n@@ -3,10 +3,14 @@\n from __future__ import annotations\n \n from itertools import cycle\n+from typing import Iterator\n+from weakref import WeakKeyDictionary\n \n from rich.console import RenderableType\n from typing_extensions import Literal, Self\n \n+from textual.app import App\n+\n from .. import events\n from ..css._error_tools import friendly_list\n from ..reactive import Reactive, reactive\n@@ -72,18 +76,13 @@\n \"\"\"\n \n # Consecutive placeholders get assigned consecutive colors.\n- _COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n+ _COLORS: WeakKeyDictionary[App, Iterator[str]] = WeakKeyDictionary()\n _SIZE_RENDER_TEMPLATE = \"[b]{} x {}[/b]\"\n \n variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant](\"default\")\n \n _renderables: dict[PlaceholderVariant, str]\n \n- @classmethod\n- def reset_color_cycle(cls) -> None:\n- \"\"\"Reset the placeholder background color cycle.\"\"\"\n- cls._COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n-\n def __init__(\n self,\n label: str | None = None,\n@@ -113,8 +112,6 @@\n \n super().__init__(name=name, id=id, classes=classes)\n \n- self.styles.background = f\"{next(Placeholder._COLORS)} 50%\"\n-\n self.variant = self.validate_variant(variant)\n \"\"\"The current variant of the placeholder.\"\"\"\n \n@@ -123,6 +120,13 @@\n while next(self._variants_cycle) != self.variant:\n pass\n \n+ def on_mount(self) -> None:\n+ \"\"\"Set the color for this placeholder.\"\"\"\n+ colors = Placeholder._COLORS.setdefault(\n+ self.app, cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n+ )\n+ self.styles.background = f\"{next(colors)} 50%\"\n+\n def render(self) -> RenderableType:\n \"\"\"Render the placeholder.\n", "issue": "Placeholder cycle per app\nThe placeholder stores the current color in a classvar.\n\nThis means that a second app will start where the previous app left off. And may return a different color from the first iteration.\n\nNot much of an issue in practice, but in the docs the colors change every screenshot. I think we need a separate cycle per app. Suggest we have an independent sequence for each app instance.\n", "before_files": [{"content": "from textual.app import App\nfrom textual.containers import Horizontal\nfrom textual.widgets import Placeholder, Label, Static\n\n\nclass Ruler(Static):\n def compose(self):\n ruler_text = \"\u00b7\u00b7\u00b7\u00b7\u2022\" * 100\n yield Label(ruler_text)\n\n\nclass HeightComparisonApp(App):\n def compose(self):\n yield Horizontal(\n Placeholder(id=\"cells\"), # (1)!\n Placeholder(id=\"percent\"),\n Placeholder(id=\"w\"),\n Placeholder(id=\"h\"),\n Placeholder(id=\"vw\"),\n Placeholder(id=\"vh\"),\n Placeholder(id=\"auto\"),\n Placeholder(id=\"fr1\"),\n Placeholder(id=\"fr3\"),\n )\n yield Ruler()\n\n\napp = HeightComparisonApp(css_path=\"width_comparison.css\")\n", "path": "docs/examples/styles/width_comparison.py"}, {"content": "\"\"\"Provides a Textual placeholder widget; useful when designing an app's layout.\"\"\"\n\nfrom __future__ import annotations\n\nfrom itertools import cycle\n\nfrom rich.console import RenderableType\nfrom typing_extensions import Literal, Self\n\nfrom .. import events\nfrom ..css._error_tools import friendly_list\nfrom ..reactive import Reactive, reactive\nfrom ..widget import Widget\n\nPlaceholderVariant = Literal[\"default\", \"size\", \"text\"]\n\"\"\"The different variants of placeholder.\"\"\"\n\n_VALID_PLACEHOLDER_VARIANTS_ORDERED: list[PlaceholderVariant] = [\n \"default\",\n \"size\",\n \"text\",\n]\n_VALID_PLACEHOLDER_VARIANTS: set[PlaceholderVariant] = set(\n _VALID_PLACEHOLDER_VARIANTS_ORDERED\n)\n_PLACEHOLDER_BACKGROUND_COLORS = [\n \"#881177\",\n \"#aa3355\",\n \"#cc6666\",\n \"#ee9944\",\n \"#eedd00\",\n \"#99dd55\",\n \"#44dd88\",\n \"#22ccbb\",\n \"#00bbcc\",\n \"#0099cc\",\n \"#3366bb\",\n \"#663399\",\n]\n_LOREM_IPSUM_PLACEHOLDER_TEXT = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam feugiat ac elit sit amet accumsan. Suspendisse bibendum nec libero quis gravida. Phasellus id eleifend ligula. Nullam imperdiet sem tellus, sed vehicula nisl faucibus sit amet. Praesent iaculis tempor ultricies. Sed lacinia, tellus id rutrum lacinia, sapien sapien congue mauris, sit amet pellentesque quam quam vel nisl. Curabitur vulputate erat pellentesque mauris posuere, non dictum risus mattis.\"\n\n\nclass InvalidPlaceholderVariant(Exception):\n \"\"\"Raised when an invalid Placeholder variant is set.\"\"\"\n\n\nclass Placeholder(Widget):\n \"\"\"A simple placeholder widget to use before you build your custom widgets.\n\n This placeholder has a couple of variants that show different data.\n Clicking the placeholder cycles through the available variants, but a placeholder\n can also be initialised in a specific variant.\n\n The variants available are:\n\n | Variant | Placeholder shows |\n |---------|------------------------------------------------|\n | default | Identifier label or the ID of the placeholder. |\n | size | Size of the placeholder. |\n | text | Lorem Ipsum text. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Placeholder {\n content-align: center middle;\n overflow: hidden;\n color: $text;\n }\n Placeholder.-text {\n padding: 1;\n }\n \"\"\"\n\n # Consecutive placeholders get assigned consecutive colors.\n _COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n _SIZE_RENDER_TEMPLATE = \"[b]{} x {}[/b]\"\n\n variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant](\"default\")\n\n _renderables: dict[PlaceholderVariant, str]\n\n @classmethod\n def reset_color_cycle(cls) -> None:\n \"\"\"Reset the placeholder background color cycle.\"\"\"\n cls._COLORS = cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n\n def __init__(\n self,\n label: str | None = None,\n variant: PlaceholderVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> None:\n \"\"\"Create a Placeholder widget.\n\n Args:\n label: The label to identify the placeholder.\n If no label is present, uses the placeholder ID instead.\n variant: The variant of the placeholder.\n name: The name of the placeholder.\n id: The ID of the placeholder in the DOM.\n classes: A space separated string with the CSS classes\n of the placeholder, if any.\n \"\"\"\n # Create and cache renderables for all the variants.\n self._renderables = {\n \"default\": label if label else f\"#{id}\" if id else \"Placeholder\",\n \"size\": \"\",\n \"text\": \"\\n\\n\".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)),\n }\n\n super().__init__(name=name, id=id, classes=classes)\n\n self.styles.background = f\"{next(Placeholder._COLORS)} 50%\"\n\n self.variant = self.validate_variant(variant)\n \"\"\"The current variant of the placeholder.\"\"\"\n\n # Set a cycle through the variants with the correct starting point.\n self._variants_cycle = cycle(_VALID_PLACEHOLDER_VARIANTS_ORDERED)\n while next(self._variants_cycle) != self.variant:\n pass\n\n def render(self) -> RenderableType:\n \"\"\"Render the placeholder.\n\n Returns:\n The value to render.\n \"\"\"\n return self._renderables[self.variant]\n\n def cycle_variant(self) -> Self:\n \"\"\"Get the next variant in the cycle.\n\n Returns:\n The `Placeholder` instance.\n \"\"\"\n self.variant = next(self._variants_cycle)\n return self\n\n def watch_variant(\n self, old_variant: PlaceholderVariant, variant: PlaceholderVariant\n ) -> None:\n self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def validate_variant(self, variant: PlaceholderVariant) -> PlaceholderVariant:\n \"\"\"Validate the variant to which the placeholder was set.\"\"\"\n if variant not in _VALID_PLACEHOLDER_VARIANTS:\n raise InvalidPlaceholderVariant(\n \"Valid placeholder variants are \"\n + f\"{friendly_list(_VALID_PLACEHOLDER_VARIANTS)}\"\n )\n return variant\n\n async def _on_click(self, _: events.Click) -> None:\n \"\"\"Click handler to cycle through the placeholder variants.\"\"\"\n self.cycle_variant()\n\n def _on_resize(self, event: events.Resize) -> None:\n \"\"\"Update the placeholder \"size\" variant with the new placeholder size.\"\"\"\n self._renderables[\"size\"] = self._SIZE_RENDER_TEMPLATE.format(*event.size)\n if self.variant == \"size\":\n self.refresh()\n", "path": "src/textual/widgets/_placeholder.py"}]}
| 2,552 | 668 |
gh_patches_debug_24273
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2727
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
exception in prometheus exporter AttributeError: 'frozenset' object has no attribute 'items'
Hi,
Using the code sample from https://github.com/open-telemetry/opentelemetry-python/blob/v1.12.0rc1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py
```
from prometheus_client import start_http_server
from opentelemetry.exporter.prometheus import PrometheusMetricReader
from opentelemetry.metrics import get_meter_provider, set_meter_provider
from opentelemetry.sdk.metrics import MeterProvider
import random
start_http_server(port=8000, addr="localhost")
prefix = "MyAppPrefix"
reader = PrometheusMetricReader(prefix)
set_meter_provider(MeterProvider(metric_readers=[reader]))
meter = get_meter_provider().get_meter("myapp", "0.1.2")
counter = meter.create_counter(
"requests",
"requests",
"number of requests",
)
labels = {"environment": "staging"}
counter.add(25, labels)
```
when accessing http://localhost:8000 and exception is thrown
` File "/Users/m_652923/.pyenv/versions/3.8.9/lib/python3.8/wsgiref/handlers.py", line 137, in run
self.result = application(self.environ, self.start_response)
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py", line 128, in prometheus_app
status, headers, output = _bake_output(registry, accept_header, accept_encoding_header, params, disable_compression)
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py", line 104, in _bake_output
output = encoder(registry)
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py", line 197, in generate_latest
for metric in registry.collect():
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/registry.py", line 97, in collect
yield from collector.collect()
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/opentelemetry/exporter/prometheus/__init__.py", line 166, in collect
self._translate_to_prometheus(
File "/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/opentelemetry/exporter/prometheus/__init__.py", line 204, in _translate_to_prometheus
for key, value in number_data_point.attributes.items():
AttributeError: 'frozenset' object has no attribute 'items'`
```
$pip freeze
backoff==1.11.1
certifi==2022.5.18.1
charset-normalizer==2.0.12
Deprecated==1.2.13
googleapis-common-protos==1.56.1
grpcio==1.46.3
idna==3.3
opentelemetry-api==1.12.0rc1
opentelemetry-exporter-otlp==1.12.0rc1
opentelemetry-exporter-otlp-proto-grpc==1.12.0rc1
opentelemetry-exporter-otlp-proto-http==1.12.0rc1
opentelemetry-exporter-prometheus==1.12.0rc1
opentelemetry-proto==1.12.0rc1
opentelemetry-sdk==1.12.0rc1
opentelemetry-semantic-conventions==0.31b0
prometheus-client==0.14.1
protobuf==3.20.1
requests==2.27.1
six==1.16.0
typing_extensions==4.2.0
urllib3==1.26.9
wrapt==1.14.1
```
This also happens when I try an `up_down_counter`
```
gauge = meter.create_up_down_counter(
'requests_for_endpoints_ms',
'millis',
'Requests for endpoints in milliseconds'
)
res = random.choice(results)
gauge.add(random.randint(10, 40), {'endpoint': res['endpoint']})
```
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from logging import getLogger
17 from threading import Lock
18 from typing import Dict, Iterable
19
20 from opentelemetry.metrics import Instrument
21 from opentelemetry.sdk.metrics._internal.aggregation import (
22 Aggregation,
23 DefaultAggregation,
24 _Aggregation,
25 _SumAggregation,
26 )
27 from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
28 from opentelemetry.sdk.metrics._internal.measurement import Measurement
29 from opentelemetry.sdk.metrics._internal.point import DataPointT
30 from opentelemetry.sdk.metrics._internal.view import View
31 from opentelemetry.util._time import _time_ns
32
33 _logger = getLogger(__name__)
34
35
36 class _ViewInstrumentMatch:
37 def __init__(
38 self,
39 view: View,
40 instrument: Instrument,
41 instrument_class_aggregation: Dict[type, Aggregation],
42 ):
43 self._start_time_unix_nano = _time_ns()
44 self._view = view
45 self._instrument = instrument
46 self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
47 self._lock = Lock()
48 self._instrument_class_aggregation = instrument_class_aggregation
49 self._name = self._view._name or self._instrument.name
50 self._description = (
51 self._view._description or self._instrument.description
52 )
53 if not isinstance(self._view._aggregation, DefaultAggregation):
54 self._aggregation = self._view._aggregation._create_aggregation(
55 self._instrument, None, 0
56 )
57 else:
58 self._aggregation = self._instrument_class_aggregation[
59 self._instrument.__class__
60 ]._create_aggregation(self._instrument, None, 0)
61
62 def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
63 # pylint: disable=protected-access
64
65 result = (
66 self._name == other._name
67 and self._instrument.unit == other._instrument.unit
68 # The aggregation class is being used here instead of data point
69 # type since they are functionally equivalent.
70 and self._aggregation.__class__ == other._aggregation.__class__
71 )
72 if isinstance(self._aggregation, _SumAggregation):
73 result = (
74 result
75 and self._aggregation._instrument_is_monotonic
76 == other._aggregation._instrument_is_monotonic
77 and self._aggregation._instrument_temporality
78 == other._aggregation._instrument_temporality
79 )
80
81 return result
82
83 # pylint: disable=protected-access
84 def consume_measurement(self, measurement: Measurement) -> None:
85
86 if self._view._attribute_keys is not None:
87
88 attributes = {}
89
90 for key, value in (measurement.attributes or {}).items():
91 if key in self._view._attribute_keys:
92 attributes[key] = value
93 elif measurement.attributes is not None:
94 attributes = measurement.attributes
95 else:
96 attributes = {}
97
98 attributes = frozenset(attributes.items())
99
100 if attributes not in self._attributes_aggregation:
101 with self._lock:
102 if attributes not in self._attributes_aggregation:
103 if not isinstance(
104 self._view._aggregation, DefaultAggregation
105 ):
106 aggregation = (
107 self._view._aggregation._create_aggregation(
108 self._instrument,
109 attributes,
110 self._start_time_unix_nano,
111 )
112 )
113 else:
114 aggregation = self._instrument_class_aggregation[
115 self._instrument.__class__
116 ]._create_aggregation(
117 self._instrument,
118 attributes,
119 self._start_time_unix_nano,
120 )
121 self._attributes_aggregation[attributes] = aggregation
122
123 self._attributes_aggregation[attributes].aggregate(measurement)
124
125 def collect(
126 self,
127 aggregation_temporality: AggregationTemporality,
128 collection_start_nanos: int,
129 ) -> Iterable[DataPointT]:
130
131 with self._lock:
132 for aggregation in self._attributes_aggregation.values():
133 yield aggregation.collect(
134 aggregation_temporality, collection_start_nanos
135 )
136
[end of opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
@@ -95,11 +95,11 @@
else:
attributes = {}
- attributes = frozenset(attributes.items())
+ aggr_key = frozenset(attributes.items())
- if attributes not in self._attributes_aggregation:
+ if aggr_key not in self._attributes_aggregation:
with self._lock:
- if attributes not in self._attributes_aggregation:
+ if aggr_key not in self._attributes_aggregation:
if not isinstance(
self._view._aggregation, DefaultAggregation
):
@@ -118,9 +118,9 @@
attributes,
self._start_time_unix_nano,
)
- self._attributes_aggregation[attributes] = aggregation
+ self._attributes_aggregation[aggr_key] = aggregation
- self._attributes_aggregation[attributes].aggregate(measurement)
+ self._attributes_aggregation[aggr_key].aggregate(measurement)
def collect(
self,
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n@@ -95,11 +95,11 @@\n else:\n attributes = {}\n \n- attributes = frozenset(attributes.items())\n+ aggr_key = frozenset(attributes.items())\n \n- if attributes not in self._attributes_aggregation:\n+ if aggr_key not in self._attributes_aggregation:\n with self._lock:\n- if attributes not in self._attributes_aggregation:\n+ if aggr_key not in self._attributes_aggregation:\n if not isinstance(\n self._view._aggregation, DefaultAggregation\n ):\n@@ -118,9 +118,9 @@\n attributes,\n self._start_time_unix_nano,\n )\n- self._attributes_aggregation[attributes] = aggregation\n+ self._attributes_aggregation[aggr_key] = aggregation\n \n- self._attributes_aggregation[attributes].aggregate(measurement)\n+ self._attributes_aggregation[aggr_key].aggregate(measurement)\n \n def collect(\n self,\n", "issue": "exception in prometheus exporter AttributeError: 'frozenset' object has no attribute 'items'\nHi,\r\nUsing the code sample from https://github.com/open-telemetry/opentelemetry-python/blob/v1.12.0rc1/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py \r\n\r\n```\r\nfrom prometheus_client import start_http_server\r\nfrom opentelemetry.exporter.prometheus import PrometheusMetricReader\r\nfrom opentelemetry.metrics import get_meter_provider, set_meter_provider\r\nfrom opentelemetry.sdk.metrics import MeterProvider\r\nimport random\r\n\r\nstart_http_server(port=8000, addr=\"localhost\")\r\n\r\nprefix = \"MyAppPrefix\"\r\nreader = PrometheusMetricReader(prefix)\r\n\r\nset_meter_provider(MeterProvider(metric_readers=[reader]))\r\nmeter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\r\ncounter = meter.create_counter(\r\n \"requests\",\r\n \"requests\",\r\n \"number of requests\",\r\n)\r\nlabels = {\"environment\": \"staging\"}\r\ncounter.add(25, labels)\r\n```\r\n\r\nwhen accessing http://localhost:8000 and exception is thrown\r\n` File \"/Users/m_652923/.pyenv/versions/3.8.9/lib/python3.8/wsgiref/handlers.py\", line 137, in run\r\n self.result = application(self.environ, self.start_response)\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py\", line 128, in prometheus_app\r\n status, headers, output = _bake_output(registry, accept_header, accept_encoding_header, params, disable_compression)\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py\", line 104, in _bake_output\r\n output = encoder(registry)\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/exposition.py\", line 197, in generate_latest\r\n for metric in registry.collect():\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/prometheus_client/registry.py\", line 97, in collect\r\n yield from collector.collect()\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/opentelemetry/exporter/prometheus/__init__.py\", line 166, in collect\r\n self._translate_to_prometheus(\r\n File \"/Users/m_652923/.pyenv/versions/otel/lib/python3.8/site-packages/opentelemetry/exporter/prometheus/__init__.py\", line 204, in _translate_to_prometheus\r\n for key, value in number_data_point.attributes.items():\r\nAttributeError: 'frozenset' object has no attribute 'items'`\r\n\r\n```\r\n$pip freeze\r\nbackoff==1.11.1\r\ncertifi==2022.5.18.1\r\ncharset-normalizer==2.0.12\r\nDeprecated==1.2.13\r\ngoogleapis-common-protos==1.56.1\r\ngrpcio==1.46.3\r\nidna==3.3\r\nopentelemetry-api==1.12.0rc1\r\nopentelemetry-exporter-otlp==1.12.0rc1\r\nopentelemetry-exporter-otlp-proto-grpc==1.12.0rc1\r\nopentelemetry-exporter-otlp-proto-http==1.12.0rc1\r\nopentelemetry-exporter-prometheus==1.12.0rc1\r\nopentelemetry-proto==1.12.0rc1\r\nopentelemetry-sdk==1.12.0rc1\r\nopentelemetry-semantic-conventions==0.31b0\r\nprometheus-client==0.14.1\r\nprotobuf==3.20.1\r\nrequests==2.27.1\r\nsix==1.16.0\r\ntyping_extensions==4.2.0\r\nurllib3==1.26.9\r\nwrapt==1.14.1\r\n```\r\nThis also happens when I try an `up_down_counter`\r\n```\r\ngauge = meter.create_up_down_counter(\r\n 'requests_for_endpoints_ms',\r\n 'millis',\r\n 'Requests for endpoints in milliseconds'\r\n )\r\n\r\nres = random.choice(results)\r\ngauge.add(random.randint(10, 40), {'endpoint': res['endpoint']})\r\n```\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Dict, Iterable\n\nfrom opentelemetry.metrics import Instrument\nfrom opentelemetry.sdk.metrics._internal.aggregation import (\n Aggregation,\n DefaultAggregation,\n _Aggregation,\n _SumAggregation,\n)\nfrom opentelemetry.sdk.metrics._internal.export import AggregationTemporality\nfrom opentelemetry.sdk.metrics._internal.measurement import Measurement\nfrom opentelemetry.sdk.metrics._internal.point import DataPointT\nfrom opentelemetry.sdk.metrics._internal.view import View\nfrom opentelemetry.util._time import _time_ns\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: Instrument,\n instrument_class_aggregation: Dict[type, Aggregation],\n ):\n self._start_time_unix_nano = _time_ns()\n self._view = view\n self._instrument = instrument\n self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n self._lock = Lock()\n self._instrument_class_aggregation = instrument_class_aggregation\n self._name = self._view._name or self._instrument.name\n self._description = (\n self._view._description or self._instrument.description\n )\n if not isinstance(self._view._aggregation, DefaultAggregation):\n self._aggregation = self._view._aggregation._create_aggregation(\n self._instrument, None, 0\n )\n else:\n self._aggregation = self._instrument_class_aggregation[\n self._instrument.__class__\n ]._create_aggregation(self._instrument, None, 0)\n\n def conflicts(self, other: \"_ViewInstrumentMatch\") -> bool:\n # pylint: disable=protected-access\n\n result = (\n self._name == other._name\n and self._instrument.unit == other._instrument.unit\n # The aggregation class is being used here instead of data point\n # type since they are functionally equivalent.\n and self._aggregation.__class__ == other._aggregation.__class__\n )\n if isinstance(self._aggregation, _SumAggregation):\n result = (\n result\n and self._aggregation._instrument_is_monotonic\n == other._aggregation._instrument_is_monotonic\n and self._aggregation._instrument_temporality\n == other._aggregation._instrument_temporality\n )\n\n return result\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if not isinstance(\n self._view._aggregation, DefaultAggregation\n ):\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument,\n attributes,\n self._start_time_unix_nano,\n )\n )\n else:\n aggregation = self._instrument_class_aggregation[\n self._instrument.__class__\n ]._create_aggregation(\n self._instrument,\n attributes,\n self._start_time_unix_nano,\n )\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(\n self,\n aggregation_temporality: AggregationTemporality,\n collection_start_nanos: int,\n ) -> Iterable[DataPointT]:\n\n with self._lock:\n for aggregation in self._attributes_aggregation.values():\n yield aggregation.collect(\n aggregation_temporality, collection_start_nanos\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py"}]}
| 2,870 | 304 |
gh_patches_debug_17033
|
rasdani/github-patches
|
git_diff
|
HybirdCorp__creme_crm-233
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
management command 'geolocation --stat' failed
really not a blocking bug, but seems very easy to fix:
```
self.sysout(f'{Town.objects.count()} town(s) in database.')
TypeError: sysout() missing 1 required positional argument: 'visible'
```
there is just a missing argument in self.sysout()
</issue>
<code>
[start of creme/geolocation/management/commands/geolocation.py]
1 # -*- coding: utf-8 -*-
2
3 ################################################################################
4 # Creme is a free/open-source Customer Relationship Management software
5 # Copyright (C) 2015-2021 Hybird
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 ################################################################################
20
21 import csv
22 import io
23 import logging
24 from functools import partial
25 from urllib.request import urlopen
26 from zipfile import ZipFile
27
28 from django.conf import settings
29 from django.core.management.base import BaseCommand
30 from django.db import transaction
31 from django.template.defaultfilters import slugify
32
33 from creme.creme_core.utils.chunktools import iter_as_chunk
34 from creme.creme_core.utils.collections import OrderedSet
35 from creme.creme_core.utils.url import parse_path
36 from creme.persons import get_address_model
37
38 from ...models import GeoAddress, Town
39
40 logger = logging.getLogger(__name__)
41
42
43 class CSVPopulatorError(Exception):
44 pass
45
46
47 class CSVPopulator:
48 class ProtocolError(CSVPopulatorError):
49 pass
50
51 class ReadError(CSVPopulatorError):
52 pass
53
54 class ParseError(CSVPopulatorError):
55 pass
56
57 class Context:
58 def __init__(self, defaults):
59 self.line = 1
60 self.defaults = defaults
61
62 def __init__(self, columns, defaults=None, chunksize=50):
63 """Constructor.
64 @param columns: Names of columns to extract from CSV file.
65 Raises an error if a column is neither in file nor in defaults.
66 @param defaults: dict of default values.
67 @param chunksize: Number of lines in same transaction.
68 By default sqlite supports 999 entries for each transaction,
69 so use 999/fields as max chunksize value.
70 """
71 self.columns = columns
72 self.defaults = defaults or {}
73 self.chunksize = chunksize
74
75 def _get_source_file(self, url_info):
76 if url_info.scheme in {'file', ''}:
77 self.info(f'Reading database from {url_info.geturl()}...')
78 return open(url_info.path, 'rb')
79 elif url_info.scheme in {'http', 'https'}:
80 self.info(f'Downloading database from {url_info.geturl()}...')
81 return urlopen(url_info.geturl())
82 else:
83 raise self.ProtocolError(
84 f'Unable to open CSV data from {url_info.geturl()} : '
85 f'unsupported protocol.'
86 )
87
88 def _mapper(self, header):
89 columns = self.columns
90 defaults = self.defaults
91
92 column_keys = OrderedSet(h.lower() for h in columns) # TODO: OrderedFrozenSet
93 row_keys = frozenset(k.lower() for k in header)
94
95 missings = []
96 constants = {}
97 indices = [(key, index) for index, key in enumerate(header) if key in column_keys]
98
99 for key in column_keys:
100 if key not in row_keys:
101 try:
102 constants[key] = defaults[key]
103 except KeyError:
104 missings.append(key)
105
106 if missings:
107 raise self.ParseError(
108 f"Following columns are missing and haven't got any default value : {missings}"
109 )
110
111 def _aux(row):
112 data = {key: row[index] or defaults.get(key) for key, index in indices}
113 data.update(constants)
114 return data
115
116 return _aux
117
118 def create(self, row, context):
119 raise NotImplementedError
120
121 def save(self, entries, context):
122 raise NotImplementedError
123
124 def pre(self, rows, context):
125 pass
126
127 def post(self, entries, context):
128 pass
129
130 def line_error(self, e, row, context):
131 pass
132
133 def chunk_error(self, e, rows, context):
134 pass
135
136 def info(self, message):
137 logger.info(message)
138
139 def populate(self, source):
140 if isinstance(source, str):
141 try:
142 url_info = parse_path(source)
143
144 with self._get_source_file(url_info) as bytes_input:
145 if url_info.path.endswith('.zip'):
146 archive = ZipFile(
147 bytes_input
148 if bytes_input.seekable() else
149 io.BytesIO(bytes_input.read())
150 )
151
152 with archive.open(archive.namelist()[0]) as zipped_bytes_input:
153 self._populate_from_bytes(zipped_bytes_input)
154 else:
155 self._populate_from_bytes(bytes_input)
156 except CSVPopulatorError:
157 raise
158 except Exception as e:
159 raise self.ReadError(f'Unable to open CSV data from {source} : {e}') from e
160 elif hasattr(source, '__iter__'):
161 self._populate_from_lines(iter(source))
162 else:
163 raise ValueError('The source must be a path or an iterable.')
164
165 def _populate_from_bytes(self, bytes_input):
166 with io.TextIOWrapper(bytes_input) as wrapped_bytes_input:
167 self._populate_from_lines(csv.reader(wrapped_bytes_input))
168
169 def _populate_from_lines(self, lines):
170 mapper = self._mapper(next(lines))
171 context = self.Context(self.defaults)
172
173 for rows in iter_as_chunk(lines, self.chunksize):
174 entries = []
175
176 if mapper:
177 rows = [mapper(row) for row in rows]
178
179 try:
180 self.pre(rows, context)
181
182 for row in rows:
183 try:
184 entries.extend(self.create(row, context))
185 except Exception as e:
186 self.line_error(e, row, context)
187
188 context.line += 1
189
190 self.save(entries, context)
191 self.post(entries, context)
192 except Exception as e:
193 self.chunk_error(e, rows, context)
194
195 def sync(self, model, entries, build_pk):
196 created = []
197 updated = []
198
199 for t in entries:
200 pk = build_pk(t)
201
202 if not pk:
203 created.append(t)
204 else:
205 t.pk = pk
206 updated.append(t)
207
208 with transaction.atomic():
209 model.objects.bulk_create(created)
210
211 # TODO: bulk_update() ?
212 for entry in updated:
213 entry.save(force_update=True)
214
215
216 class CSVTownPopulator(CSVPopulator):
217 def __init__(self, defaults=None, chunksize=100):
218 super().__init__(['title', 'zipcode', 'latitude', 'longitude', 'country'],
219 defaults=defaults, chunksize=chunksize,
220 )
221
222 def line_error(self, e, row, context):
223 logger.error(' invalid data (line %d) : %s', context.line, e)
224
225 def chunk_error(self, e, rows, context):
226 logger.error(' invalid data chunk : %s', e)
227
228 def create(self, row, context):
229 zipcodes = row['zipcode'].split('-')
230
231 name = row['title']
232 latitude = row['latitude']
233 longitude = row['longitude']
234
235 slug = slugify(name)
236 country = row['country']
237
238 build_town = partial(Town, country=country)
239
240 return [build_town(name=name,
241 slug=slug,
242 zipcode=zipcode,
243 latitude=latitude,
244 longitude=longitude,
245 ) for zipcode in zipcodes
246 ]
247
248 def save(self, entries, context):
249 get_existing_pk_by_zipcode = dict(
250 Town.objects.filter(
251 zipcode__in=(t.zipcode for t in entries),
252 slug__in=(t.slug for t in entries),
253 ).values_list('zipcode', 'pk')
254 ).get
255
256 self.sync(
257 Town, entries,
258 lambda town: get_existing_pk_by_zipcode(town.zipcode),
259 )
260
261
262 class Command(BaseCommand):
263 def add_arguments(self, parser):
264 add_argument = parser.add_argument
265 add_argument(
266 '-p', '--populate', action='store_true', dest='populate',
267 help='Populate addresses', default=False,
268 )
269 add_argument(
270 '-s', '--stat', action='store_true', dest='stats',
271 help='Display geolocation database stats', default=False,
272 )
273 add_argument(
274 '-i', '--import', action='store_true', dest='import', default=False,
275 help='Import towns configured in GEOLOCATION_TOWNS setting',
276 )
277
278 def sysout(self, message, visible):
279 if visible:
280 self.stdout.write(message)
281
282 def syserr(self, message):
283 self.stderr.write(message)
284
285 def populate_addresses(self, verbosity=0):
286 self.sysout('Populate geolocation information of addresses...', verbosity > 0)
287 GeoAddress.populate_geoaddresses(get_address_model().objects.exclude(zipcode='', city=''))
288
289 def import_town_database(self, url, defaults):
290 try:
291 CSVTownPopulator(defaults=defaults).populate(url)
292 except Exception as e:
293 self.syserr(str(e))
294
295 def import_town_all(self, verbosity=0):
296 self.sysout('Importing Towns database...', verbosity > 0)
297
298 for url, defaults in settings.GEOLOCATION_TOWNS:
299 self.sysout(url, verbosity > 1)
300 self.import_town_database(url, defaults)
301
302 def print_stats(self):
303 self.sysout(f'{Town.objects.count()} town(s) in database.')
304
305 def handle(self, *args, **options):
306 populate = options.get('populate')
307 stats = options.get('stats')
308 imports = options.get('import')
309 verbosity = options.get('verbosity')
310
311 if stats:
312 self.print_stats()
313
314 if imports:
315 self.import_town_all(verbosity)
316
317 if populate:
318 self.populate_addresses(verbosity)
319
[end of creme/geolocation/management/commands/geolocation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/creme/geolocation/management/commands/geolocation.py b/creme/geolocation/management/commands/geolocation.py
--- a/creme/geolocation/management/commands/geolocation.py
+++ b/creme/geolocation/management/commands/geolocation.py
@@ -299,8 +299,8 @@
self.sysout(url, verbosity > 1)
self.import_town_database(url, defaults)
- def print_stats(self):
- self.sysout(f'{Town.objects.count()} town(s) in database.')
+ def print_stats(self, verbosity=0):
+ self.sysout(f'{Town.objects.count()} town(s) in database.', verbosity > 0)
def handle(self, *args, **options):
populate = options.get('populate')
@@ -309,7 +309,7 @@
verbosity = options.get('verbosity')
if stats:
- self.print_stats()
+ self.print_stats(verbosity)
if imports:
self.import_town_all(verbosity)
|
{"golden_diff": "diff --git a/creme/geolocation/management/commands/geolocation.py b/creme/geolocation/management/commands/geolocation.py\n--- a/creme/geolocation/management/commands/geolocation.py\n+++ b/creme/geolocation/management/commands/geolocation.py\n@@ -299,8 +299,8 @@\n self.sysout(url, verbosity > 1)\n self.import_town_database(url, defaults)\n \n- def print_stats(self):\n- self.sysout(f'{Town.objects.count()} town(s) in database.')\n+ def print_stats(self, verbosity=0):\n+ self.sysout(f'{Town.objects.count()} town(s) in database.', verbosity > 0)\n \n def handle(self, *args, **options):\n populate = options.get('populate')\n@@ -309,7 +309,7 @@\n verbosity = options.get('verbosity')\n \n if stats:\n- self.print_stats()\n+ self.print_stats(verbosity)\n \n if imports:\n self.import_town_all(verbosity)\n", "issue": "management command 'geolocation --stat' failed\nreally not a blocking bug, but seems very easy to fix:\r\n```\r\n self.sysout(f'{Town.objects.count()} town(s) in database.')\r\nTypeError: sysout() missing 1 required positional argument: 'visible'\r\n```\r\n\r\nthere is just a missing argument in self.sysout()\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n################################################################################\n# Creme is a free/open-source Customer Relationship Management software\n# Copyright (C) 2015-2021 Hybird\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n################################################################################\n\nimport csv\nimport io\nimport logging\nfrom functools import partial\nfrom urllib.request import urlopen\nfrom zipfile import ZipFile\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.template.defaultfilters import slugify\n\nfrom creme.creme_core.utils.chunktools import iter_as_chunk\nfrom creme.creme_core.utils.collections import OrderedSet\nfrom creme.creme_core.utils.url import parse_path\nfrom creme.persons import get_address_model\n\nfrom ...models import GeoAddress, Town\n\nlogger = logging.getLogger(__name__)\n\n\nclass CSVPopulatorError(Exception):\n pass\n\n\nclass CSVPopulator:\n class ProtocolError(CSVPopulatorError):\n pass\n\n class ReadError(CSVPopulatorError):\n pass\n\n class ParseError(CSVPopulatorError):\n pass\n\n class Context:\n def __init__(self, defaults):\n self.line = 1\n self.defaults = defaults\n\n def __init__(self, columns, defaults=None, chunksize=50):\n \"\"\"Constructor.\n @param columns: Names of columns to extract from CSV file.\n Raises an error if a column is neither in file nor in defaults.\n @param defaults: dict of default values.\n @param chunksize: Number of lines in same transaction.\n By default sqlite supports 999 entries for each transaction,\n so use 999/fields as max chunksize value.\n \"\"\"\n self.columns = columns\n self.defaults = defaults or {}\n self.chunksize = chunksize\n\n def _get_source_file(self, url_info):\n if url_info.scheme in {'file', ''}:\n self.info(f'Reading database from {url_info.geturl()}...')\n return open(url_info.path, 'rb')\n elif url_info.scheme in {'http', 'https'}:\n self.info(f'Downloading database from {url_info.geturl()}...')\n return urlopen(url_info.geturl())\n else:\n raise self.ProtocolError(\n f'Unable to open CSV data from {url_info.geturl()} : '\n f'unsupported protocol.'\n )\n\n def _mapper(self, header):\n columns = self.columns\n defaults = self.defaults\n\n column_keys = OrderedSet(h.lower() for h in columns) # TODO: OrderedFrozenSet\n row_keys = frozenset(k.lower() for k in header)\n\n missings = []\n constants = {}\n indices = [(key, index) for index, key in enumerate(header) if key in column_keys]\n\n for key in column_keys:\n if key not in row_keys:\n try:\n constants[key] = defaults[key]\n except KeyError:\n missings.append(key)\n\n if missings:\n raise self.ParseError(\n f\"Following columns are missing and haven't got any default value : {missings}\"\n )\n\n def _aux(row):\n data = {key: row[index] or defaults.get(key) for key, index in indices}\n data.update(constants)\n return data\n\n return _aux\n\n def create(self, row, context):\n raise NotImplementedError\n\n def save(self, entries, context):\n raise NotImplementedError\n\n def pre(self, rows, context):\n pass\n\n def post(self, entries, context):\n pass\n\n def line_error(self, e, row, context):\n pass\n\n def chunk_error(self, e, rows, context):\n pass\n\n def info(self, message):\n logger.info(message)\n\n def populate(self, source):\n if isinstance(source, str):\n try:\n url_info = parse_path(source)\n\n with self._get_source_file(url_info) as bytes_input:\n if url_info.path.endswith('.zip'):\n archive = ZipFile(\n bytes_input\n if bytes_input.seekable() else\n io.BytesIO(bytes_input.read())\n )\n\n with archive.open(archive.namelist()[0]) as zipped_bytes_input:\n self._populate_from_bytes(zipped_bytes_input)\n else:\n self._populate_from_bytes(bytes_input)\n except CSVPopulatorError:\n raise\n except Exception as e:\n raise self.ReadError(f'Unable to open CSV data from {source} : {e}') from e\n elif hasattr(source, '__iter__'):\n self._populate_from_lines(iter(source))\n else:\n raise ValueError('The source must be a path or an iterable.')\n\n def _populate_from_bytes(self, bytes_input):\n with io.TextIOWrapper(bytes_input) as wrapped_bytes_input:\n self._populate_from_lines(csv.reader(wrapped_bytes_input))\n\n def _populate_from_lines(self, lines):\n mapper = self._mapper(next(lines))\n context = self.Context(self.defaults)\n\n for rows in iter_as_chunk(lines, self.chunksize):\n entries = []\n\n if mapper:\n rows = [mapper(row) for row in rows]\n\n try:\n self.pre(rows, context)\n\n for row in rows:\n try:\n entries.extend(self.create(row, context))\n except Exception as e:\n self.line_error(e, row, context)\n\n context.line += 1\n\n self.save(entries, context)\n self.post(entries, context)\n except Exception as e:\n self.chunk_error(e, rows, context)\n\n def sync(self, model, entries, build_pk):\n created = []\n updated = []\n\n for t in entries:\n pk = build_pk(t)\n\n if not pk:\n created.append(t)\n else:\n t.pk = pk\n updated.append(t)\n\n with transaction.atomic():\n model.objects.bulk_create(created)\n\n # TODO: bulk_update() ?\n for entry in updated:\n entry.save(force_update=True)\n\n\nclass CSVTownPopulator(CSVPopulator):\n def __init__(self, defaults=None, chunksize=100):\n super().__init__(['title', 'zipcode', 'latitude', 'longitude', 'country'],\n defaults=defaults, chunksize=chunksize,\n )\n\n def line_error(self, e, row, context):\n logger.error(' invalid data (line %d) : %s', context.line, e)\n\n def chunk_error(self, e, rows, context):\n logger.error(' invalid data chunk : %s', e)\n\n def create(self, row, context):\n zipcodes = row['zipcode'].split('-')\n\n name = row['title']\n latitude = row['latitude']\n longitude = row['longitude']\n\n slug = slugify(name)\n country = row['country']\n\n build_town = partial(Town, country=country)\n\n return [build_town(name=name,\n slug=slug,\n zipcode=zipcode,\n latitude=latitude,\n longitude=longitude,\n ) for zipcode in zipcodes\n ]\n\n def save(self, entries, context):\n get_existing_pk_by_zipcode = dict(\n Town.objects.filter(\n zipcode__in=(t.zipcode for t in entries),\n slug__in=(t.slug for t in entries),\n ).values_list('zipcode', 'pk')\n ).get\n\n self.sync(\n Town, entries,\n lambda town: get_existing_pk_by_zipcode(town.zipcode),\n )\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n add_argument = parser.add_argument\n add_argument(\n '-p', '--populate', action='store_true', dest='populate',\n help='Populate addresses', default=False,\n )\n add_argument(\n '-s', '--stat', action='store_true', dest='stats',\n help='Display geolocation database stats', default=False,\n )\n add_argument(\n '-i', '--import', action='store_true', dest='import', default=False,\n help='Import towns configured in GEOLOCATION_TOWNS setting',\n )\n\n def sysout(self, message, visible):\n if visible:\n self.stdout.write(message)\n\n def syserr(self, message):\n self.stderr.write(message)\n\n def populate_addresses(self, verbosity=0):\n self.sysout('Populate geolocation information of addresses...', verbosity > 0)\n GeoAddress.populate_geoaddresses(get_address_model().objects.exclude(zipcode='', city=''))\n\n def import_town_database(self, url, defaults):\n try:\n CSVTownPopulator(defaults=defaults).populate(url)\n except Exception as e:\n self.syserr(str(e))\n\n def import_town_all(self, verbosity=0):\n self.sysout('Importing Towns database...', verbosity > 0)\n\n for url, defaults in settings.GEOLOCATION_TOWNS:\n self.sysout(url, verbosity > 1)\n self.import_town_database(url, defaults)\n\n def print_stats(self):\n self.sysout(f'{Town.objects.count()} town(s) in database.')\n\n def handle(self, *args, **options):\n populate = options.get('populate')\n stats = options.get('stats')\n imports = options.get('import')\n verbosity = options.get('verbosity')\n\n if stats:\n self.print_stats()\n\n if imports:\n self.import_town_all(verbosity)\n\n if populate:\n self.populate_addresses(verbosity)\n", "path": "creme/geolocation/management/commands/geolocation.py"}]}
| 3,660 | 228 |
gh_patches_debug_8875
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1401
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tests for SkillHttpClient
see dotnet and javascript imp
[enhancement]
</issue>
<code>
[start of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from logging import Logger
5
6 from botbuilder.core import InvokeResponse
7 from botbuilder.integration.aiohttp import BotFrameworkHttpClient
8 from botbuilder.core.skills import (
9 ConversationIdFactoryBase,
10 SkillConversationIdFactoryOptions,
11 BotFrameworkSkill,
12 )
13 from botbuilder.schema import Activity
14 from botframework.connector.auth import (
15 AuthenticationConstants,
16 ChannelProvider,
17 GovernmentConstants,
18 SimpleCredentialProvider,
19 )
20
21
22 class SkillHttpClient(BotFrameworkHttpClient):
23 def __init__(
24 self,
25 credential_provider: SimpleCredentialProvider,
26 skill_conversation_id_factory: ConversationIdFactoryBase,
27 channel_provider: ChannelProvider = None,
28 logger: Logger = None,
29 ):
30 if not skill_conversation_id_factory:
31 raise TypeError(
32 "SkillHttpClient(): skill_conversation_id_factory can't be None"
33 )
34
35 super().__init__(credential_provider)
36
37 self._skill_conversation_id_factory = skill_conversation_id_factory
38 self._channel_provider = channel_provider
39
40 async def post_activity_to_skill(
41 self,
42 from_bot_id: str,
43 to_skill: BotFrameworkSkill,
44 service_url: str,
45 activity: Activity,
46 originating_audience: str = None,
47 ) -> InvokeResponse:
48
49 if originating_audience is None:
50 originating_audience = (
51 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
52 if self._channel_provider is not None
53 and self._channel_provider.IsGovernment()
54 else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
55 )
56
57 options = SkillConversationIdFactoryOptions(
58 from_bot_oauth_scope=originating_audience,
59 from_bot_id=from_bot_id,
60 activity=activity,
61 bot_framework_skill=to_skill,
62 )
63
64 skill_conversation_id = await self._skill_conversation_id_factory.create_skill_conversation_id(
65 options
66 )
67
68 return await super().post_activity(
69 from_bot_id,
70 to_skill.app_id,
71 to_skill.skill_endpoint,
72 service_url,
73 skill_conversation_id,
74 activity,
75 )
76
[end of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py
--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py
+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py
@@ -50,7 +50,7 @@
originating_audience = (
GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
if self._channel_provider is not None
- and self._channel_provider.IsGovernment()
+ and self._channel_provider.is_government()
else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
)
|
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py\n@@ -50,7 +50,7 @@\n originating_audience = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n if self._channel_provider is not None\n- and self._channel_provider.IsGovernment()\n+ and self._channel_provider.is_government()\n else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n", "issue": "Add tests for SkillHttpClient\nsee dotnet and javascript imp\r\n\r\n[enhancement]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom logging import Logger\n\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.integration.aiohttp import BotFrameworkHttpClient\nfrom botbuilder.core.skills import (\n ConversationIdFactoryBase,\n SkillConversationIdFactoryOptions,\n BotFrameworkSkill,\n)\nfrom botbuilder.schema import Activity\nfrom botframework.connector.auth import (\n AuthenticationConstants,\n ChannelProvider,\n GovernmentConstants,\n SimpleCredentialProvider,\n)\n\n\nclass SkillHttpClient(BotFrameworkHttpClient):\n def __init__(\n self,\n credential_provider: SimpleCredentialProvider,\n skill_conversation_id_factory: ConversationIdFactoryBase,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not skill_conversation_id_factory:\n raise TypeError(\n \"SkillHttpClient(): skill_conversation_id_factory can't be None\"\n )\n\n super().__init__(credential_provider)\n\n self._skill_conversation_id_factory = skill_conversation_id_factory\n self._channel_provider = channel_provider\n\n async def post_activity_to_skill(\n self,\n from_bot_id: str,\n to_skill: BotFrameworkSkill,\n service_url: str,\n activity: Activity,\n originating_audience: str = None,\n ) -> InvokeResponse:\n\n if originating_audience is None:\n originating_audience = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n if self._channel_provider is not None\n and self._channel_provider.IsGovernment()\n else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n options = SkillConversationIdFactoryOptions(\n from_bot_oauth_scope=originating_audience,\n from_bot_id=from_bot_id,\n activity=activity,\n bot_framework_skill=to_skill,\n )\n\n skill_conversation_id = await self._skill_conversation_id_factory.create_skill_conversation_id(\n options\n )\n\n return await super().post_activity(\n from_bot_id,\n to_skill.app_id,\n to_skill.skill_endpoint,\n service_url,\n skill_conversation_id,\n activity,\n )\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py"}]}
| 1,189 | 189 |
gh_patches_debug_1421
|
rasdani/github-patches
|
git_diff
|
beeware__toga-2582
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dialog windows are not modal
### Describe the bug
Create a dialog like this:
```
async def on_button_test(widget):
await self.main_window.info_dialog(title="Dialog", message="An Info Dialog")
```
You can now click on the main window behind the dialog and the main window is activated although the dialog is still floating above the window. This allows the dialog to be shown again or other actions to be triggered although the user should be blocked in the dialog.
Either change the behavior of dialogs to be modal or add a "modal={True|False}" property to "info_dialog" (and all similar functions) to enforce this.
### Steps to reproduce
1) Create self.main_window.info_dialog
2) Click main window
3) Main window is activated and can be interacted with.
### Expected behavior
Main window can not be activated (dialog keeps focus)
### Screenshots
_No response_
### Environment
Linux (GenToo). Toga in virtual environment.
### Logs
_No response_
### Additional context
_No response_
</issue>
<code>
[start of gtk/src/toga_gtk/dialogs.py]
1 from abc import ABC
2 from pathlib import Path
3
4 from .libs import Gtk
5
6
7 class BaseDialog(ABC):
8 def __init__(self, interface):
9 self.interface = interface
10 self.interface._impl = self
11
12
13 class MessageDialog(BaseDialog):
14 def __init__(
15 self,
16 interface,
17 title,
18 message_type,
19 buttons,
20 success_result=None,
21 **kwargs,
22 ):
23 super().__init__(interface=interface)
24 self.success_result = success_result
25
26 self.native = Gtk.MessageDialog(
27 transient_for=interface.window._impl.native,
28 flags=0,
29 message_type=message_type,
30 buttons=buttons,
31 text=title,
32 )
33 self.build_dialog(**kwargs)
34
35 self.native.connect("response", self.gtk_response)
36 self.native.show()
37
38 def build_dialog(self, message):
39 self.native.format_secondary_text(message)
40
41 def gtk_response(self, dialog, response):
42 if self.success_result:
43 result = response == self.success_result
44 else:
45 result = None
46
47 self.interface.set_result(result)
48
49 self.native.destroy()
50
51
52 class InfoDialog(MessageDialog):
53 def __init__(self, interface, title, message):
54 super().__init__(
55 interface=interface,
56 title=title,
57 message=message,
58 message_type=Gtk.MessageType.INFO,
59 buttons=Gtk.ButtonsType.OK,
60 )
61
62
63 class QuestionDialog(MessageDialog):
64 def __init__(self, interface, title, message):
65 super().__init__(
66 interface=interface,
67 title=title,
68 message=message,
69 message_type=Gtk.MessageType.QUESTION,
70 buttons=Gtk.ButtonsType.YES_NO,
71 success_result=Gtk.ResponseType.YES,
72 )
73
74
75 class ConfirmDialog(MessageDialog):
76 def __init__(self, interface, title, message):
77 super().__init__(
78 interface=interface,
79 title=title,
80 message=message,
81 message_type=Gtk.MessageType.WARNING,
82 buttons=Gtk.ButtonsType.OK_CANCEL,
83 success_result=Gtk.ResponseType.OK,
84 )
85
86
87 class ErrorDialog(MessageDialog):
88 def __init__(self, interface, title, message):
89 super().__init__(
90 interface=interface,
91 title=title,
92 message=message,
93 message_type=Gtk.MessageType.ERROR,
94 buttons=Gtk.ButtonsType.CANCEL,
95 )
96
97
98 class StackTraceDialog(MessageDialog):
99 def __init__(self, interface, title, **kwargs):
100 super().__init__(
101 interface=interface,
102 title=title,
103 message_type=Gtk.MessageType.ERROR,
104 buttons=(
105 Gtk.ButtonsType.CANCEL if kwargs.get("retry") else Gtk.ButtonsType.OK
106 ),
107 success_result=Gtk.ResponseType.OK if kwargs.get("retry") else None,
108 **kwargs,
109 )
110
111 def build_dialog(self, message, content, retry):
112 container = self.native.get_message_area()
113
114 self.native.format_secondary_text(message)
115
116 # Create a scrolling readonly text area, in monospace font, to contain the stack trace.
117 buffer = Gtk.TextBuffer()
118 buffer.set_text(content)
119
120 trace = Gtk.TextView()
121 trace.set_buffer(buffer)
122 trace.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
123 trace.set_property("editable", False)
124 trace.set_property("cursor-visible", False)
125
126 trace.get_style_context().add_class("toga")
127 trace.get_style_context().add_class("stacktrace")
128 trace.get_style_context().add_class("dialog")
129
130 style_provider = Gtk.CssProvider()
131 style_provider.load_from_data(b".toga.stacktrace {font-family: monospace;}")
132
133 trace.get_style_context().add_provider(
134 style_provider,
135 Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,
136 )
137
138 scroll = Gtk.ScrolledWindow()
139 scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
140 scroll.set_size_request(500, 200)
141 scroll.add(trace)
142
143 container.pack_end(scroll, False, False, 0)
144
145 container.show_all()
146
147 # If this is a retry dialog, add a retry button (which maps to OK).
148 if retry:
149 self.native.add_button("Retry", Gtk.ResponseType.OK)
150
151
152 class FileDialog(BaseDialog):
153 def __init__(
154 self,
155 interface,
156 title,
157 filename,
158 initial_directory,
159 file_types,
160 multiple_select,
161 action,
162 ok_icon,
163 ):
164 super().__init__(interface=interface)
165
166 self.native = Gtk.FileChooserDialog(
167 transient_for=interface.window._impl.native,
168 title=title,
169 action=action,
170 )
171 self.native.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
172 self.native.add_button(ok_icon, Gtk.ResponseType.OK)
173
174 if filename:
175 self.native.set_current_name(filename)
176
177 if initial_directory:
178 self.native.set_current_folder(str(initial_directory))
179
180 if file_types:
181 for file_type in file_types:
182 filter_filetype = Gtk.FileFilter()
183 filter_filetype.set_name("." + file_type + " files")
184 filter_filetype.add_pattern("*." + file_type)
185 self.native.add_filter(filter_filetype)
186
187 self.multiple_select = multiple_select
188 if self.multiple_select:
189 self.native.set_select_multiple(True)
190
191 self.native.connect("response", self.gtk_response)
192 self.native.show()
193
194 # Provided as a stub that can be mocked in test conditions
195 def selected_path(self):
196 return self.native.get_filename()
197
198 # Provided as a stub that can be mocked in test conditions
199 def selected_paths(self):
200 return self.native.get_filenames()
201
202 def gtk_response(self, dialog, response):
203 if response == Gtk.ResponseType.OK:
204 if self.multiple_select:
205 result = [Path(filename) for filename in self.selected_paths()]
206 else:
207 result = Path(self.selected_path())
208 else:
209 result = None
210
211 self.interface.set_result(result)
212
213 self.native.destroy()
214
215
216 class SaveFileDialog(FileDialog):
217 def __init__(
218 self,
219 interface,
220 title,
221 filename,
222 initial_directory,
223 file_types=None,
224 ):
225 super().__init__(
226 interface=interface,
227 title=title,
228 filename=filename,
229 initial_directory=initial_directory,
230 file_types=file_types,
231 multiple_select=False,
232 action=Gtk.FileChooserAction.SAVE,
233 ok_icon=Gtk.STOCK_SAVE,
234 )
235
236
237 class OpenFileDialog(FileDialog):
238 def __init__(
239 self,
240 interface,
241 title,
242 initial_directory,
243 file_types,
244 multiple_select,
245 ):
246 super().__init__(
247 interface=interface,
248 title=title,
249 filename=None,
250 initial_directory=initial_directory,
251 file_types=file_types,
252 multiple_select=multiple_select,
253 action=Gtk.FileChooserAction.OPEN,
254 ok_icon=Gtk.STOCK_OPEN,
255 )
256
257
258 class SelectFolderDialog(FileDialog):
259 def __init__(
260 self,
261 interface,
262 title,
263 initial_directory,
264 multiple_select,
265 ):
266 super().__init__(
267 interface=interface,
268 title=title,
269 filename=None,
270 initial_directory=initial_directory,
271 file_types=None,
272 multiple_select=multiple_select,
273 action=Gtk.FileChooserAction.SELECT_FOLDER,
274 ok_icon=Gtk.STOCK_OPEN,
275 )
276
[end of gtk/src/toga_gtk/dialogs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gtk/src/toga_gtk/dialogs.py b/gtk/src/toga_gtk/dialogs.py
--- a/gtk/src/toga_gtk/dialogs.py
+++ b/gtk/src/toga_gtk/dialogs.py
@@ -30,6 +30,7 @@
buttons=buttons,
text=title,
)
+ self.native.set_modal(True)
self.build_dialog(**kwargs)
self.native.connect("response", self.gtk_response)
|
{"golden_diff": "diff --git a/gtk/src/toga_gtk/dialogs.py b/gtk/src/toga_gtk/dialogs.py\n--- a/gtk/src/toga_gtk/dialogs.py\n+++ b/gtk/src/toga_gtk/dialogs.py\n@@ -30,6 +30,7 @@\n buttons=buttons,\n text=title,\n )\n+ self.native.set_modal(True)\n self.build_dialog(**kwargs)\n \n self.native.connect(\"response\", self.gtk_response)\n", "issue": "Dialog windows are not modal\n### Describe the bug\n\nCreate a dialog like this:\r\n```\r\nasync def on_button_test(widget):\r\n await self.main_window.info_dialog(title=\"Dialog\", message=\"An Info Dialog\")\r\n```\r\nYou can now click on the main window behind the dialog and the main window is activated although the dialog is still floating above the window. This allows the dialog to be shown again or other actions to be triggered although the user should be blocked in the dialog.\r\n\r\nEither change the behavior of dialogs to be modal or add a \"modal={True|False}\" property to \"info_dialog\" (and all similar functions) to enforce this.\n\n### Steps to reproduce\n\n1) Create self.main_window.info_dialog\r\n2) Click main window\r\n3) Main window is activated and can be interacted with. \n\n### Expected behavior\n\nMain window can not be activated (dialog keeps focus)\n\n### Screenshots\n\n_No response_\n\n### Environment\n\nLinux (GenToo). Toga in virtual environment.\n\n### Logs\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from abc import ABC\nfrom pathlib import Path\n\nfrom .libs import Gtk\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n\nclass MessageDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n message_type,\n buttons,\n success_result=None,\n **kwargs,\n ):\n super().__init__(interface=interface)\n self.success_result = success_result\n\n self.native = Gtk.MessageDialog(\n transient_for=interface.window._impl.native,\n flags=0,\n message_type=message_type,\n buttons=buttons,\n text=title,\n )\n self.build_dialog(**kwargs)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n def build_dialog(self, message):\n self.native.format_secondary_text(message)\n\n def gtk_response(self, dialog, response):\n if self.success_result:\n result = response == self.success_result\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass InfoDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.INFO,\n buttons=Gtk.ButtonsType.OK,\n )\n\n\nclass QuestionDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.QUESTION,\n buttons=Gtk.ButtonsType.YES_NO,\n success_result=Gtk.ResponseType.YES,\n )\n\n\nclass ConfirmDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.WARNING,\n buttons=Gtk.ButtonsType.OK_CANCEL,\n success_result=Gtk.ResponseType.OK,\n )\n\n\nclass ErrorDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface=interface,\n title=title,\n message=message,\n message_type=Gtk.MessageType.ERROR,\n buttons=Gtk.ButtonsType.CANCEL,\n )\n\n\nclass StackTraceDialog(MessageDialog):\n def __init__(self, interface, title, **kwargs):\n super().__init__(\n interface=interface,\n title=title,\n message_type=Gtk.MessageType.ERROR,\n buttons=(\n Gtk.ButtonsType.CANCEL if kwargs.get(\"retry\") else Gtk.ButtonsType.OK\n ),\n success_result=Gtk.ResponseType.OK if kwargs.get(\"retry\") else None,\n **kwargs,\n )\n\n def build_dialog(self, message, content, retry):\n container = self.native.get_message_area()\n\n self.native.format_secondary_text(message)\n\n # Create a scrolling readonly text area, in monospace font, to contain the stack trace.\n buffer = Gtk.TextBuffer()\n buffer.set_text(content)\n\n trace = Gtk.TextView()\n trace.set_buffer(buffer)\n trace.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n trace.set_property(\"editable\", False)\n trace.set_property(\"cursor-visible\", False)\n\n trace.get_style_context().add_class(\"toga\")\n trace.get_style_context().add_class(\"stacktrace\")\n trace.get_style_context().add_class(\"dialog\")\n\n style_provider = Gtk.CssProvider()\n style_provider.load_from_data(b\".toga.stacktrace {font-family: monospace;}\")\n\n trace.get_style_context().add_provider(\n style_provider,\n Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,\n )\n\n scroll = Gtk.ScrolledWindow()\n scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)\n scroll.set_size_request(500, 200)\n scroll.add(trace)\n\n container.pack_end(scroll, False, False, 0)\n\n container.show_all()\n\n # If this is a retry dialog, add a retry button (which maps to OK).\n if retry:\n self.native.add_button(\"Retry\", Gtk.ResponseType.OK)\n\n\nclass FileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types,\n multiple_select,\n action,\n ok_icon,\n ):\n super().__init__(interface=interface)\n\n self.native = Gtk.FileChooserDialog(\n transient_for=interface.window._impl.native,\n title=title,\n action=action,\n )\n self.native.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n self.native.add_button(ok_icon, Gtk.ResponseType.OK)\n\n if filename:\n self.native.set_current_name(filename)\n\n if initial_directory:\n self.native.set_current_folder(str(initial_directory))\n\n if file_types:\n for file_type in file_types:\n filter_filetype = Gtk.FileFilter()\n filter_filetype.set_name(\".\" + file_type + \" files\")\n filter_filetype.add_pattern(\"*.\" + file_type)\n self.native.add_filter(filter_filetype)\n\n self.multiple_select = multiple_select\n if self.multiple_select:\n self.native.set_select_multiple(True)\n\n self.native.connect(\"response\", self.gtk_response)\n self.native.show()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_path(self):\n return self.native.get_filename()\n\n # Provided as a stub that can be mocked in test conditions\n def selected_paths(self):\n return self.native.get_filenames()\n\n def gtk_response(self, dialog, response):\n if response == Gtk.ResponseType.OK:\n if self.multiple_select:\n result = [Path(filename) for filename in self.selected_paths()]\n else:\n result = Path(self.selected_path())\n else:\n result = None\n\n self.interface.set_result(result)\n\n self.native.destroy()\n\n\nclass SaveFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types=None,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=filename,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=False,\n action=Gtk.FileChooserAction.SAVE,\n ok_icon=Gtk.STOCK_SAVE,\n )\n\n\nclass OpenFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=file_types,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.OPEN,\n ok_icon=Gtk.STOCK_OPEN,\n )\n\n\nclass SelectFolderDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n multiple_select,\n ):\n super().__init__(\n interface=interface,\n title=title,\n filename=None,\n initial_directory=initial_directory,\n file_types=None,\n multiple_select=multiple_select,\n action=Gtk.FileChooserAction.SELECT_FOLDER,\n ok_icon=Gtk.STOCK_OPEN,\n )\n", "path": "gtk/src/toga_gtk/dialogs.py"}]}
| 3,016 | 103 |
gh_patches_debug_35082
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-480
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instructions for local development are incomplete.
When executing the line:
```
python setup.py test
```
as directed, I get the following error:
```
running test
Traceback (most recent call last):
File "setup.py", line 111, in <module>
tests_require=test_requirements
File "/usr/lib/python2.7/distutils/core.py", line 151, in setup
dist.run_commands()
File "/usr/lib/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "setup.py", line 60, in run
import pytest
ImportError: No module named pytest
```
I'm not sure what you are supposed to do to install the 'tests_require' dependencies.
</issue>
<code>
[start of cookiecutter/compat.py]
1 import os
2 import sys
3
4 PY3 = sys.version_info[0] == 3
5 OLD_PY2 = sys.version_info[:2] < (2, 7)
6
7
8 if PY3: # pragma: no cover
9 input_str = 'builtins.input'
10 iteritems = lambda d: iter(d.items())
11 from unittest.mock import patch
12 from io import StringIO
13
14
15 else: # pragma: no cover
16 from __builtin__ import raw_input
17 input = raw_input
18 input_str = '__builtin__.raw_input'
19 iteritems = lambda d: d.iteritems()
20 from mock import patch
21 from cStringIO import StringIO
22
23
24 if PY3: # Forced testing
25
26 from shutil import which
27
28 else: # Forced testing
29
30 def is_exe(program):
31 """
32 Returns whether or not a file is an executable.
33 """
34 return os.path.isfile(program) and os.access(program, os.X_OK)
35
36 def which(cmd, mode=os.F_OK | os.X_OK, path=None):
37 """Given a command, mode, and a PATH string, return the path which
38 conforms to the given mode on the PATH, or None if there is no such
39 file.
40 `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
41 of os.environ.get("PATH"), or can be overridden with a custom search
42 path.
43
44 Note: This function was backported from the Python 3 source code.
45 """
46 # Check that a given file can be accessed with the correct mode.
47 # Additionally check that `file` is not a directory, as on Windows
48 # directories pass the os.access check.
49 def _access_check(fn, mode):
50 return (os.path.exists(fn) and os.access(fn, mode) and
51 not os.path.isdir(fn))
52
53 # If we're given a path with a directory part, look it up directly
54 # rather than referring to PATH directories. This includes checking
55 # relative to the current directory, e.g. ./script
56 if os.path.dirname(cmd):
57 if _access_check(cmd, mode):
58 return cmd
59 return None
60
61 if path is None:
62 path = os.environ.get("PATH", os.defpath)
63 if not path:
64 return None
65 path = path.split(os.pathsep)
66
67 if sys.platform == "win32":
68 # The current directory takes precedence on Windows.
69 if os.curdir not in path:
70 path.insert(0, os.curdir)
71
72 # PATHEXT is necessary to check on Windows.
73 pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
74 # See if the given file matches any of the expected path
75 # extensions. This will allow us to short circuit when given
76 # "python.exe". If it does match, only test that one, otherwise we
77 # have to try others.
78 if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
79 files = [cmd]
80 else:
81 files = [cmd + ext for ext in pathext]
82 else:
83 # On other platforms you don't have things like PATHEXT to tell you
84 # what file suffixes are executable, so just pass on cmd as-is.
85 files = [cmd]
86
87 seen = set()
88 for dir in path:
89 normdir = os.path.normcase(dir)
90 if normdir not in seen:
91 seen.add(normdir)
92 for thefile in files:
93 name = os.path.join(dir, thefile)
94 if _access_check(name, mode):
95 return name
96 return None
97
98
99 def is_string(obj):
100 """Determine if an object is a string."""
101 return isinstance(obj, str if PY3 else basestring)
102
103
104 _hush_pyflakes = (patch, StringIO, which)
105
[end of cookiecutter/compat.py]
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 try:
7 from setuptools import setup, Command
8 except ImportError:
9 from distutils.core import setup, Command
10
11 version = "1.0.0"
12
13 if sys.argv[-1] == 'publish':
14 os.system('python setup.py sdist upload')
15 os.system('python setup.py bdist_wheel upload')
16 sys.exit()
17
18 if sys.argv[-1] == 'tag':
19 os.system("git tag -a %s -m 'version %s'" % (version, version))
20 os.system("git push --tags")
21 sys.exit()
22
23 with open('README.rst') as readme_file:
24 readme = readme_file.read()
25
26 with open('HISTORY.rst') as history_file:
27 history = history_file.read().replace('.. :changelog:', '')
28
29 requirements = [
30 'binaryornot>=0.2.0',
31 'jinja2>=2.7',
32 'PyYAML>=3.10',
33 'click<5.0'
34 ]
35
36 test_requirements = [
37 'pytest'
38 ]
39
40 # Add Python 2.7-specific dependencies
41 if sys.version < '3':
42 requirements.append('mock')
43
44 # There are no Python 3-specific dependencies to add
45
46 long_description = readme + '\n\n' + history
47
48 if sys.argv[-1] == 'readme':
49 print(long_description)
50 sys.exit()
51
52
53 class PyTest(Command):
54 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
55
56 def initialize_options(self):
57 self.pytest_args = []
58
59 def finalize_options(self):
60 pass
61
62 def run(self):
63 import pytest
64 errno = pytest.main(self.pytest_args)
65 sys.exit(errno)
66
67
68 setup(
69 name='cookiecutter',
70 version=version,
71 description=('A command-line utility that creates projects from project '
72 'templates, e.g. creating a Python package project from a '
73 'Python package project template.'),
74 long_description=long_description,
75 author='Audrey Roy',
76 author_email='[email protected]',
77 url='https://github.com/audreyr/cookiecutter',
78 packages=[
79 'cookiecutter',
80 ],
81 package_dir={'cookiecutter': 'cookiecutter'},
82 entry_points={
83 'console_scripts': [
84 'cookiecutter = cookiecutter.cli:main',
85 ]
86 },
87 include_package_data=True,
88 install_requires=requirements,
89 license='BSD',
90 zip_safe=False,
91 classifiers=[
92 'Development Status :: 3 - Alpha',
93 'Environment :: Console',
94 'Intended Audience :: Developers',
95 'Natural Language :: English',
96 'License :: OSI Approved :: BSD License',
97 'Programming Language :: Python',
98 'Programming Language :: Python :: 2',
99 'Programming Language :: Python :: 2.7',
100 'Programming Language :: Python :: 3',
101 'Programming Language :: Python :: 3.3',
102 'Programming Language :: Python :: 3.4',
103 'Programming Language :: Python :: Implementation :: CPython',
104 'Programming Language :: Python :: Implementation :: PyPy',
105 'Topic :: Software Development',
106 ],
107 keywords=(
108 'cookiecutter, Python, projects, project templates, Jinja2, '
109 'skeleton, scaffolding, project directory, setup.py, package, '
110 'packaging'
111 ),
112 cmdclass = {'test': PyTest},
113 test_suite='tests',
114 tests_require=test_requirements
115 )
116
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py
--- a/cookiecutter/compat.py
+++ b/cookiecutter/compat.py
@@ -8,7 +8,6 @@
if PY3: # pragma: no cover
input_str = 'builtins.input'
iteritems = lambda d: iter(d.items())
- from unittest.mock import patch
from io import StringIO
@@ -17,7 +16,6 @@
input = raw_input
input_str = '__builtin__.raw_input'
iteritems = lambda d: d.iteritems()
- from mock import patch
from cStringIO import StringIO
@@ -101,4 +99,4 @@
return isinstance(obj, str if PY3 else basestring)
-_hush_pyflakes = (patch, StringIO, which)
+_hush_pyflakes = (StringIO, which)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,9 +4,9 @@
import sys
try:
- from setuptools import setup, Command
+ from setuptools import setup
except ImportError:
- from distutils.core import setup, Command
+ from distutils.core import setup
version = "1.0.0"
@@ -33,16 +33,6 @@
'click<5.0'
]
-test_requirements = [
- 'pytest'
-]
-
-# Add Python 2.7-specific dependencies
-if sys.version < '3':
- requirements.append('mock')
-
-# There are no Python 3-specific dependencies to add
-
long_description = readme + '\n\n' + history
if sys.argv[-1] == 'readme':
@@ -50,21 +40,6 @@
sys.exit()
-class PyTest(Command):
- user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
-
- def initialize_options(self):
- self.pytest_args = []
-
- def finalize_options(self):
- pass
-
- def run(self):
- import pytest
- errno = pytest.main(self.pytest_args)
- sys.exit(errno)
-
-
setup(
name='cookiecutter',
version=version,
@@ -109,7 +84,4 @@
'skeleton, scaffolding, project directory, setup.py, package, '
'packaging'
),
- cmdclass = {'test': PyTest},
- test_suite='tests',
- tests_require=test_requirements
)
|
{"golden_diff": "diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py\n--- a/cookiecutter/compat.py\n+++ b/cookiecutter/compat.py\n@@ -8,7 +8,6 @@\n if PY3: # pragma: no cover\n input_str = 'builtins.input'\n iteritems = lambda d: iter(d.items())\n- from unittest.mock import patch\n from io import StringIO\n \n \n@@ -17,7 +16,6 @@\n input = raw_input\n input_str = '__builtin__.raw_input'\n iteritems = lambda d: d.iteritems()\n- from mock import patch\n from cStringIO import StringIO\n \n \n@@ -101,4 +99,4 @@\n return isinstance(obj, str if PY3 else basestring)\n \n \n-_hush_pyflakes = (patch, StringIO, which)\n+_hush_pyflakes = (StringIO, which)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,9 +4,9 @@\n import sys\n \n try:\n- from setuptools import setup, Command\n+ from setuptools import setup\n except ImportError:\n- from distutils.core import setup, Command\n+ from distutils.core import setup\n \n version = \"1.0.0\"\n \n@@ -33,16 +33,6 @@\n 'click<5.0'\n ]\n \n-test_requirements = [\n- 'pytest'\n-]\n-\n-# Add Python 2.7-specific dependencies\n-if sys.version < '3':\n- requirements.append('mock')\n-\n-# There are no Python 3-specific dependencies to add\n-\n long_description = readme + '\\n\\n' + history\n \n if sys.argv[-1] == 'readme':\n@@ -50,21 +40,6 @@\n sys.exit()\n \n \n-class PyTest(Command):\n- user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n-\n- def initialize_options(self):\n- self.pytest_args = []\n-\n- def finalize_options(self):\n- pass\n-\n- def run(self):\n- import pytest\n- errno = pytest.main(self.pytest_args)\n- sys.exit(errno)\n-\n-\n setup(\n name='cookiecutter',\n version=version,\n@@ -109,7 +84,4 @@\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n- cmdclass = {'test': PyTest},\n- test_suite='tests',\n- tests_require=test_requirements\n )\n", "issue": "Instructions for local development are incomplete.\nWhen executing the line:\n\n```\npython setup.py test\n```\n\nas directed, I get the following error:\n\n```\nrunning test\nTraceback (most recent call last):\n File \"setup.py\", line 111, in <module>\n tests_require=test_requirements\n File \"/usr/lib/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/lib/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"setup.py\", line 60, in run\n import pytest\nImportError: No module named pytest\n```\n\nI'm not sure what you are supposed to do to install the 'tests_require' dependencies.\n\n", "before_files": [{"content": "import os\nimport sys\n\nPY3 = sys.version_info[0] == 3\nOLD_PY2 = sys.version_info[:2] < (2, 7)\n\n\nif PY3: # pragma: no cover\n input_str = 'builtins.input'\n iteritems = lambda d: iter(d.items())\n from unittest.mock import patch\n from io import StringIO\n\n\nelse: # pragma: no cover\n from __builtin__ import raw_input\n input = raw_input\n input_str = '__builtin__.raw_input'\n iteritems = lambda d: d.iteritems()\n from mock import patch\n from cStringIO import StringIO\n\n\nif PY3: # Forced testing\n\n from shutil import which\n\nelse: # Forced testing\n\n def is_exe(program):\n \"\"\"\n Returns whether or not a file is an executable.\n \"\"\"\n return os.path.isfile(program) and os.access(program, os.X_OK)\n\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n Note: This function was backported from the Python 3 source code.\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and\n not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\n\ndef is_string(obj):\n \"\"\"Determine if an object is a string.\"\"\"\n return isinstance(obj, str if PY3 else basestring)\n\n\n_hush_pyflakes = (patch, StringIO, which)\n", "path": "cookiecutter/compat.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup, Command\nexcept ImportError:\n from distutils.core import setup, Command\n\nversion = \"1.0.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'PyYAML>=3.10',\n 'click<5.0'\n]\n\ntest_requirements = [\n 'pytest'\n]\n\n# Add Python 2.7-specific dependencies\nif sys.version < '3':\n requirements.append('mock')\n\n# There are no Python 3-specific dependencies to add\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nclass PyTest(Command):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n self.pytest_args = []\n\n def finalize_options(self):\n pass\n\n def run(self):\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n cmdclass = {'test': PyTest},\n test_suite='tests',\n tests_require=test_requirements\n)\n", "path": "setup.py"}]}
| 2,782 | 566 |
gh_patches_debug_41513
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-663
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reward point redemption error message
When users try to redeem 0 reward points they get the error message "You don't have enough reward points". The error message should be something like "You must select the number of reward points you want to redeem" instead.
</issue>
<code>
[start of evap/rewards/tools.py]
1 from django.conf import settings
2 from django.contrib import messages
3 from django.db import transaction
4 from django.utils.translation import ugettext as _
5 from django.dispatch import receiver
6
7 from django.contrib.auth.decorators import login_required
8 from evap.evaluation.models import Course
9
10 from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation
11
12 @login_required
13 @transaction.atomic
14 def save_redemptions(request, redemptions):
15 total_points_available = reward_points_of_user(request.user)
16 total_points_redeemed = sum(redemptions.values())
17
18 if total_points_redeemed == 0 or total_points_redeemed > total_points_available:
19 return False
20
21 for event_id in redemptions:
22 if redemptions[event_id] > 0:
23 redemption = RewardPointRedemption(
24 user_profile=request.user,
25 value=redemptions[event_id],
26 event=RewardPointRedemptionEvent.objects.get(id=event_id)
27 )
28 redemption.save()
29 return True
30
31
32 def can_user_use_reward_points(user):
33 return not user.is_external and user.is_participant
34
35
36 def reward_points_of_user(user):
37 reward_point_grantings = RewardPointGranting.objects.filter(user_profile=user)
38 reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=user)
39
40 count = 0
41 for granting in reward_point_grantings:
42 count += granting.value
43 for redemption in reward_point_redemptions:
44 count -= redemption.value
45
46 return count
47
48
49 @receiver(Course.course_evaluated)
50 def grant_reward_points(sender, **kwargs):
51 # grant reward points if all conditions are fulfilled
52
53 request = kwargs['request']
54 semester = kwargs['semester']
55 if not can_user_use_reward_points(request.user):
56 return
57 # has the semester been activated for reward points?
58 if not is_semester_activated(semester):
59 return
60 # does the user have at least one required course in this semester?
61 required_courses = Course.objects.filter(participants=request.user, semester=semester, is_required_for_reward=True)
62 if not required_courses.exists():
63 return
64 # does the user not participate in any more required courses in this semester?
65 if required_courses.exclude(voters=request.user).exists():
66 return
67 # did the user not already get reward points for this semester?
68 if not RewardPointGranting.objects.filter(user_profile=request.user, semester=semester):
69 granting = RewardPointGranting(user_profile=request.user, semester=semester, value=settings.REWARD_POINTS_PER_SEMESTER)
70 granting.save()
71 messages.success(request, _("You just have earned reward points for this semester because you evaluated all your courses. Thank you very much!"))
72
73
74 def is_semester_activated(semester):
75 try:
76 activation = SemesterActivation.objects.get(semester=semester)
77 return activation.is_active
78 except SemesterActivation.DoesNotExist:
79 return False
80
[end of evap/rewards/tools.py]
[start of evap/rewards/views.py]
1 from django.contrib import messages
2 from django.shortcuts import get_object_or_404, redirect, render
3 from django.utils.translation import ugettext as _
4 from django.utils.translation import get_language
5 from django.http import HttpResponse
6 from datetime import datetime
7 from operator import attrgetter
8
9 from evap.evaluation.auth import reward_user_required, staff_required
10 from evap.evaluation.models import Semester, Course
11
12 from evap.staff.views import semester_view
13
14 from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation
15 from evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points
16 from evap.rewards.forms import RewardPointRedemptionEventForm
17 from evap.rewards.exporters import ExcelExporter
18
19 @reward_user_required
20 def index(request):
21 if request.method == 'POST':
22 redemptions = {}
23 for key, value in request.POST.items():
24 if(key.startswith('points-')):
25 event_id = int(key.rpartition('-')[2])
26 redemptions[event_id] = int(value)
27
28 if save_redemptions(request, redemptions):
29 messages.success(request, _("You successfully redeemed your points."))
30 else:
31 messages.warning(request, _("You don't have enough reward points."))
32
33 total_points_available = reward_points_of_user(request.user)
34 reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)
35 reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)
36 events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now())
37 events = sorted(events, key=lambda event: event.date)
38
39 reward_point_actions=[]
40 for granting in reward_point_grantings:
41 reward_point_actions.append((granting.granting_time, _('Reward for') + ' ' + granting.semester.name, granting.value, ''))
42 for redemption in reward_point_redemptions:
43 reward_point_actions.append((redemption.redemption_time, redemption.event.name, '', redemption.value))
44
45 reward_point_actions.sort(key=lambda action: action[0], reverse=True)
46
47 template_data = dict(
48 reward_point_actions=reward_point_actions,
49 total_points_available=total_points_available,
50 events=events,
51 point_selection=[x for x in range(0,total_points_available+1)])
52 return render(request, "rewards_index.html", template_data)
53
54
55 @staff_required
56 def semester_reward_points(request, semester_id):
57 semester = get_object_or_404(Semester, id=semester_id)
58 courses = Course.objects.filter(semester=semester)
59 participants = set()
60 for course in courses:
61 for participant in course.participants.all():
62 if can_user_use_reward_points(participant):
63 participants.add(participant)
64 participants = sorted(participants, key=attrgetter('last_name', 'first_name'))
65
66 data = []
67 for participant in participants:
68 number_of_required_courses = Course.objects.filter(semester=semester, participants=participant, is_required_for_reward=True).count()
69 number_of_required_courses_voted_for = Course.objects.filter(semester=semester, voters=participant, is_required_for_reward=True).count()
70 number_of_optional_courses = Course.objects.filter(semester=semester, participants=participant, is_required_for_reward=False).count()
71 number_of_optional_courses_voted_for = Course.objects.filter(semester=semester, voters=participant, is_required_for_reward=False).count()
72 earned_reward_points = RewardPointGranting.objects.filter(semester=semester, user_profile=participant).exists()
73 data.append((participant, number_of_required_courses_voted_for, number_of_required_courses,
74 number_of_optional_courses_voted_for, number_of_optional_courses, earned_reward_points))
75
76 template_data = dict(semester=semester, data=data, disable_breadcrumb_semester=False)
77 return render(request, "rewards_semester_reward_points_view.html", template_data)
78
79
80 @staff_required
81 def reward_point_redemption_events(request):
82 upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by('date')
83 past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by('-date')
84 template_data = dict(upcoming_events=upcoming_events, past_events=past_events)
85 return render(request, "rewards_reward_point_redemption_events.html", template_data)
86
87
88 @staff_required
89 def reward_point_redemption_event_create(request):
90 event = RewardPointRedemptionEvent()
91 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
92
93 if form.is_valid():
94 form.save()
95 messages.success(request, _("Successfully created event."))
96 return redirect('rewards:reward_point_redemption_events')
97 else:
98 return render(request, "rewards_reward_point_redemption_event_form.html", dict(form=form))
99
100
101 @staff_required
102 def reward_point_redemption_event_edit(request, event_id):
103 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
104 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
105
106 if form.is_valid():
107 event = form.save()
108
109 messages.success(request, _("Successfully updated event."))
110 return redirect('rewards:reward_point_redemption_events')
111 else:
112 return render(request, "rewards_reward_point_redemption_event_form.html", dict(event=event, form=form))
113
114
115 @staff_required
116 def reward_point_redemption_event_delete(request, event_id):
117 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
118
119 if event.can_delete:
120 if request.method == 'POST':
121 event.delete()
122 return redirect('rewards:reward_point_redemption_events')
123 else:
124 return render(request, "rewards_reward_point_redemption_event_delete.html", dict(event=event))
125 else:
126 messages.warning(request, _("This event cannot be deleted because some users already redeemed points for it."))
127 return redirect('rewards:reward_point_redemption_events')
128
129
130 @staff_required
131 def reward_point_redemption_event_export(request, event_id):
132 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
133
134 filename = _("RewardPoints")+"-%s-%s-%s.xls" % (event.date, event.name, get_language())
135
136 response = HttpResponse(content_type="application/vnd.ms-excel")
137 response["Content-Disposition"] = "attachment; filename=\"%s\"" % filename
138
139 ExcelExporter(event.redemptions_by_user()).export(response)
140
141 return response
142
143
144 @staff_required
145 def semester_activation(request, semester_id, active):
146 if active == 'on':
147 active = True
148 else:
149 active = False
150
151 try:
152 activation = SemesterActivation.objects.filter(semester=Semester.objects.get(id=semester_id)).get()
153 activation.is_active = active
154 except SemesterActivation.DoesNotExist:
155 activation = SemesterActivation(semester=Semester.objects.get(id=semester_id), is_active=active)
156 activation.save()
157
158 return semester_view(request=request, semester_id=semester_id)
159
[end of evap/rewards/views.py]
[start of evap/rewards/models.py]
1 from django.utils.translation import ugettext_lazy as _
2 from django.db import models
3
4 from collections import OrderedDict
5
6
7 class RewardPointRedemptionEvent(models.Model):
8 name = models.CharField(max_length=1024, verbose_name=_("event name"))
9 date = models.DateField(verbose_name=_("event date"))
10 redeem_end_date = models.DateField(verbose_name=_("redemption end date"))
11
12 @property
13 def can_delete(self):
14 if RewardPointRedemption.objects.filter(event=self).exists():
15 return False
16 return True
17
18 def redemptions_by_user(self):
19 redemptions = self.reward_point_redemptions.order_by('user_profile')
20 redemptions_dict = OrderedDict()
21 for redemption in redemptions:
22 if not redemption.user_profile in redemptions_dict:
23 redemptions_dict[redemption.user_profile] = 0
24 redemptions_dict[redemption.user_profile] += redemption.value
25 return redemptions_dict
26
27 class RewardPointGranting(models.Model):
28 user_profile = models.ForeignKey('evaluation.UserProfile', related_name="reward_point_grantings")
29 semester = models.ForeignKey('evaluation.Semester', related_name="reward_point_grantings", blank=True, null=True)
30 granting_time = models.DateTimeField(verbose_name=_("granting time"), auto_now_add=True)
31 value = models.IntegerField(verbose_name=_("value"), default=0)
32
33 class RewardPointRedemption(models.Model):
34 user_profile = models.ForeignKey('evaluation.UserProfile', related_name="reward_point_redemptions")
35 redemption_time = models.DateTimeField(verbose_name=_("redemption time"), auto_now_add=True)
36 value = models.IntegerField(verbose_name=_("value"), default=0)
37 event = models.ForeignKey(RewardPointRedemptionEvent, related_name="reward_point_redemptions")
38
39 class SemesterActivation(models.Model):
40 semester = models.OneToOneField('evaluation.Semester', related_name='rewards_active')
41 is_active = models.BooleanField(default=False)
42
[end of evap/rewards/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/rewards/models.py b/evap/rewards/models.py
--- a/evap/rewards/models.py
+++ b/evap/rewards/models.py
@@ -3,6 +3,13 @@
from collections import OrderedDict
+class NoPointsSelected(Exception):
+ """An attempt has been made to redeem <= 0 points."""
+ pass
+
+class NotEnoughPoints(Exception):
+ """An attempt has been made to redeem more points than available."""
+ pass
class RewardPointRedemptionEvent(models.Model):
name = models.CharField(max_length=1024, verbose_name=_("event name"))
diff --git a/evap/rewards/tools.py b/evap/rewards/tools.py
--- a/evap/rewards/tools.py
+++ b/evap/rewards/tools.py
@@ -7,7 +7,7 @@
from django.contrib.auth.decorators import login_required
from evap.evaluation.models import Course
-from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation
+from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation, NoPointsSelected, NotEnoughPoints
@login_required
@transaction.atomic
@@ -15,8 +15,11 @@
total_points_available = reward_points_of_user(request.user)
total_points_redeemed = sum(redemptions.values())
- if total_points_redeemed == 0 or total_points_redeemed > total_points_available:
- return False
+ if total_points_redeemed <= 0:
+ raise NoPointsSelected(_("You cannot redeem 0 points."))
+
+ if total_points_redeemed > total_points_available:
+ raise NotEnoughPoints(_("You don't have enough reward points."))
for event_id in redemptions:
if redemptions[event_id] > 0:
@@ -26,8 +29,6 @@
event=RewardPointRedemptionEvent.objects.get(id=event_id)
)
redemption.save()
- return True
-
def can_user_use_reward_points(user):
return not user.is_external and user.is_participant
diff --git a/evap/rewards/views.py b/evap/rewards/views.py
--- a/evap/rewards/views.py
+++ b/evap/rewards/views.py
@@ -11,7 +11,7 @@
from evap.staff.views import semester_view
-from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation
+from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation, NoPointsSelected, NotEnoughPoints
from evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points
from evap.rewards.forms import RewardPointRedemptionEventForm
from evap.rewards.exporters import ExcelExporter
@@ -25,10 +25,11 @@
event_id = int(key.rpartition('-')[2])
redemptions[event_id] = int(value)
- if save_redemptions(request, redemptions):
+ try:
+ save_redemptions(request, redemptions)
messages.success(request, _("You successfully redeemed your points."))
- else:
- messages.warning(request, _("You don't have enough reward points."))
+ except (NoPointsSelected, NotEnoughPoints) as error:
+ messages.warning(request, error)
total_points_available = reward_points_of_user(request.user)
reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)
|
{"golden_diff": "diff --git a/evap/rewards/models.py b/evap/rewards/models.py\n--- a/evap/rewards/models.py\n+++ b/evap/rewards/models.py\n@@ -3,6 +3,13 @@\n \n from collections import OrderedDict\n \n+class NoPointsSelected(Exception):\n+ \"\"\"An attempt has been made to redeem <= 0 points.\"\"\"\n+ pass\n+\n+class NotEnoughPoints(Exception):\n+ \"\"\"An attempt has been made to redeem more points than available.\"\"\"\n+ pass\n \n class RewardPointRedemptionEvent(models.Model):\n name = models.CharField(max_length=1024, verbose_name=_(\"event name\"))\ndiff --git a/evap/rewards/tools.py b/evap/rewards/tools.py\n--- a/evap/rewards/tools.py\n+++ b/evap/rewards/tools.py\n@@ -7,7 +7,7 @@\n from django.contrib.auth.decorators import login_required\n from evap.evaluation.models import Course\n \n-from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation\n+from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation, NoPointsSelected, NotEnoughPoints\n \n @login_required\n @transaction.atomic\n@@ -15,8 +15,11 @@\n total_points_available = reward_points_of_user(request.user)\n total_points_redeemed = sum(redemptions.values())\n \n- if total_points_redeemed == 0 or total_points_redeemed > total_points_available:\n- return False\n+ if total_points_redeemed <= 0:\n+ raise NoPointsSelected(_(\"You cannot redeem 0 points.\"))\n+\n+ if total_points_redeemed > total_points_available:\n+ raise NotEnoughPoints(_(\"You don't have enough reward points.\"))\n \n for event_id in redemptions:\n if redemptions[event_id] > 0:\n@@ -26,8 +29,6 @@\n event=RewardPointRedemptionEvent.objects.get(id=event_id)\n )\n redemption.save()\n- return True\n-\n \n def can_user_use_reward_points(user):\n return not user.is_external and user.is_participant\ndiff --git a/evap/rewards/views.py b/evap/rewards/views.py\n--- a/evap/rewards/views.py\n+++ b/evap/rewards/views.py\n@@ -11,7 +11,7 @@\n \n from evap.staff.views import semester_view\n \n-from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation\n+from evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation, NoPointsSelected, NotEnoughPoints\n from evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points\n from evap.rewards.forms import RewardPointRedemptionEventForm\n from evap.rewards.exporters import ExcelExporter\n@@ -25,10 +25,11 @@\n event_id = int(key.rpartition('-')[2])\n redemptions[event_id] = int(value)\n \n- if save_redemptions(request, redemptions):\n+ try:\n+ save_redemptions(request, redemptions)\n messages.success(request, _(\"You successfully redeemed your points.\"))\n- else:\n- messages.warning(request, _(\"You don't have enough reward points.\"))\n+ except (NoPointsSelected, NotEnoughPoints) as error:\n+ messages.warning(request, error)\n \n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n", "issue": "Reward point redemption error message\nWhen users try to redeem 0 reward points they get the error message \"You don't have enough reward points\". The error message should be something like \"You must select the number of reward points you want to redeem\" instead.\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.db import transaction\nfrom django.utils.translation import ugettext as _\nfrom django.dispatch import receiver\n\nfrom django.contrib.auth.decorators import login_required\nfrom evap.evaluation.models import Course\n\nfrom evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation\n\n@login_required\[email protected]\ndef save_redemptions(request, redemptions):\n total_points_available = reward_points_of_user(request.user)\n total_points_redeemed = sum(redemptions.values())\n\n if total_points_redeemed == 0 or total_points_redeemed > total_points_available:\n return False\n\n for event_id in redemptions:\n if redemptions[event_id] > 0:\n redemption = RewardPointRedemption(\n user_profile=request.user,\n value=redemptions[event_id],\n event=RewardPointRedemptionEvent.objects.get(id=event_id)\n )\n redemption.save()\n return True\n\n\ndef can_user_use_reward_points(user):\n return not user.is_external and user.is_participant\n\n\ndef reward_points_of_user(user):\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=user)\n\n count = 0\n for granting in reward_point_grantings:\n count += granting.value\n for redemption in reward_point_redemptions:\n count -= redemption.value\n\n return count\n\n\n@receiver(Course.course_evaluated)\ndef grant_reward_points(sender, **kwargs):\n # grant reward points if all conditions are fulfilled\n\n request = kwargs['request']\n semester = kwargs['semester']\n if not can_user_use_reward_points(request.user):\n return\n # has the semester been activated for reward points?\n if not is_semester_activated(semester):\n return\n # does the user have at least one required course in this semester?\n required_courses = Course.objects.filter(participants=request.user, semester=semester, is_required_for_reward=True)\n if not required_courses.exists():\n return\n # does the user not participate in any more required courses in this semester?\n if required_courses.exclude(voters=request.user).exists():\n return\n # did the user not already get reward points for this semester?\n if not RewardPointGranting.objects.filter(user_profile=request.user, semester=semester):\n granting = RewardPointGranting(user_profile=request.user, semester=semester, value=settings.REWARD_POINTS_PER_SEMESTER)\n granting.save()\n messages.success(request, _(\"You just have earned reward points for this semester because you evaluated all your courses. Thank you very much!\"))\n\n\ndef is_semester_activated(semester):\n try:\n activation = SemesterActivation.objects.get(semester=semester)\n return activation.is_active\n except SemesterActivation.DoesNotExist:\n return False\n", "path": "evap/rewards/tools.py"}, {"content": "from django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import get_language\nfrom django.http import HttpResponse\nfrom datetime import datetime\nfrom operator import attrgetter\n\nfrom evap.evaluation.auth import reward_user_required, staff_required\nfrom evap.evaluation.models import Semester, Course\n\nfrom evap.staff.views import semester_view\n\nfrom evap.rewards.models import RewardPointGranting, RewardPointRedemption, RewardPointRedemptionEvent, SemesterActivation\nfrom evap.rewards.tools import save_redemptions, reward_points_of_user, can_user_use_reward_points\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.exporters import ExcelExporter\n\n@reward_user_required\ndef index(request):\n if request.method == 'POST':\n redemptions = {}\n for key, value in request.POST.items():\n if(key.startswith('points-')):\n event_id = int(key.rpartition('-')[2])\n redemptions[event_id] = int(value)\n\n if save_redemptions(request, redemptions):\n messages.success(request, _(\"You successfully redeemed your points.\"))\n else:\n messages.warning(request, _(\"You don't have enough reward points.\"))\n\n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now())\n events = sorted(events, key=lambda event: event.date)\n\n reward_point_actions=[]\n for granting in reward_point_grantings:\n reward_point_actions.append((granting.granting_time, _('Reward for') + ' ' + granting.semester.name, granting.value, ''))\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, '', redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n template_data = dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n point_selection=[x for x in range(0,total_points_available+1)])\n return render(request, \"rewards_index.html\", template_data)\n\n\n@staff_required\ndef semester_reward_points(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n courses = Course.objects.filter(semester=semester)\n participants = set()\n for course in courses:\n for participant in course.participants.all():\n if can_user_use_reward_points(participant):\n participants.add(participant)\n participants = sorted(participants, key=attrgetter('last_name', 'first_name'))\n\n data = []\n for participant in participants:\n number_of_required_courses = Course.objects.filter(semester=semester, participants=participant, is_required_for_reward=True).count()\n number_of_required_courses_voted_for = Course.objects.filter(semester=semester, voters=participant, is_required_for_reward=True).count()\n number_of_optional_courses = Course.objects.filter(semester=semester, participants=participant, is_required_for_reward=False).count()\n number_of_optional_courses_voted_for = Course.objects.filter(semester=semester, voters=participant, is_required_for_reward=False).count()\n earned_reward_points = RewardPointGranting.objects.filter(semester=semester, user_profile=participant).exists()\n data.append((participant, number_of_required_courses_voted_for, number_of_required_courses,\n number_of_optional_courses_voted_for, number_of_optional_courses, earned_reward_points))\n\n template_data = dict(semester=semester, data=data, disable_breadcrumb_semester=False)\n return render(request, \"rewards_semester_reward_points_view.html\", template_data)\n\n\n@staff_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by('date')\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by('-date')\n template_data = dict(upcoming_events=upcoming_events, past_events=past_events)\n return render(request, \"rewards_reward_point_redemption_events.html\", template_data)\n\n\n@staff_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect('rewards:reward_point_redemption_events')\n else:\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(form=form))\n\n\n@staff_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect('rewards:reward_point_redemption_events')\n else:\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form))\n\n\n@staff_required\ndef reward_point_redemption_event_delete(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n if event.can_delete:\n if request.method == 'POST':\n event.delete()\n return redirect('rewards:reward_point_redemption_events')\n else:\n return render(request, \"rewards_reward_point_redemption_event_delete.html\", dict(event=event))\n else:\n messages.warning(request, _(\"This event cannot be deleted because some users already redeemed points for it.\"))\n return redirect('rewards:reward_point_redemption_events')\n\n\n@staff_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\")+\"-%s-%s-%s.xls\" % (event.date, event.name, get_language())\n\n response = HttpResponse(content_type=\"application/vnd.ms-excel\")\n response[\"Content-Disposition\"] = \"attachment; filename=\\\"%s\\\"\" % filename\n\n ExcelExporter(event.redemptions_by_user()).export(response)\n\n return response\n\n\n@staff_required\ndef semester_activation(request, semester_id, active):\n if active == 'on':\n active = True\n else:\n active = False\n\n try:\n activation = SemesterActivation.objects.filter(semester=Semester.objects.get(id=semester_id)).get()\n activation.is_active = active\n except SemesterActivation.DoesNotExist:\n activation = SemesterActivation(semester=Semester.objects.get(id=semester_id), is_active=active)\n activation.save()\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.db import models\n\nfrom collections import OrderedDict\n\n\nclass RewardPointRedemptionEvent(models.Model):\n name = models.CharField(max_length=1024, verbose_name=_(\"event name\"))\n date = models.DateField(verbose_name=_(\"event date\"))\n redeem_end_date = models.DateField(verbose_name=_(\"redemption end date\"))\n\n @property\n def can_delete(self):\n if RewardPointRedemption.objects.filter(event=self).exists():\n return False\n return True\n\n def redemptions_by_user(self):\n redemptions = self.reward_point_redemptions.order_by('user_profile')\n redemptions_dict = OrderedDict()\n for redemption in redemptions:\n if not redemption.user_profile in redemptions_dict:\n redemptions_dict[redemption.user_profile] = 0\n redemptions_dict[redemption.user_profile] += redemption.value\n return redemptions_dict\n\nclass RewardPointGranting(models.Model):\n user_profile = models.ForeignKey('evaluation.UserProfile', related_name=\"reward_point_grantings\")\n semester = models.ForeignKey('evaluation.Semester', related_name=\"reward_point_grantings\", blank=True, null=True)\n granting_time = models.DateTimeField(verbose_name=_(\"granting time\"), auto_now_add=True)\n value = models.IntegerField(verbose_name=_(\"value\"), default=0)\n\nclass RewardPointRedemption(models.Model):\n user_profile = models.ForeignKey('evaluation.UserProfile', related_name=\"reward_point_redemptions\")\n redemption_time = models.DateTimeField(verbose_name=_(\"redemption time\"), auto_now_add=True)\n value = models.IntegerField(verbose_name=_(\"value\"), default=0)\n event = models.ForeignKey(RewardPointRedemptionEvent, related_name=\"reward_point_redemptions\")\n\nclass SemesterActivation(models.Model):\n semester = models.OneToOneField('evaluation.Semester', related_name='rewards_active')\n is_active = models.BooleanField(default=False)\n", "path": "evap/rewards/models.py"}]}
| 3,768 | 804 |
gh_patches_debug_18075
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-8169
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feature: Set the meltano.yml path in a directory different from the project
### Feature scope
Configuration (settings parsing, validation, etc.)
### Description
I really tried to make a docker container with meltano to run in AWS Lambda, but I had a lot of problems because in my case I needed to change meltano.yml in run time, but only possible to change files in AWS Lambda if this file is in /tmp.
I believe could be a great feature if it is possible to set meltano.yml path by a environment variable. I tried to change this meltano source code but I had problems with the plugins installations. These changes are beyond my skills.
So I propose this new feature like other projects have (DBT as an example), where it is possible set a new configuration file or set a path for the configuration file.
</issue>
<code>
[start of src/meltano/cli/add.py]
1 """Plugin Add CLI."""
2
3 from __future__ import annotations
4
5 import typing as t
6 from pathlib import Path
7 from urllib.parse import urlparse
8
9 import click
10 import requests
11
12 from meltano.cli.params import pass_project
13 from meltano.cli.utils import (
14 CliError,
15 PartialInstrumentedCmd,
16 add_plugin,
17 add_required_plugins,
18 check_dependencies_met,
19 install_plugins,
20 )
21 from meltano.core.plugin import PluginRef, PluginType
22 from meltano.core.plugin_install_service import PluginInstallReason
23 from meltano.core.project_add_service import ProjectAddService
24 from meltano.core.tracking.contexts import CliEvent, PluginsTrackingContext
25 from meltano.core.yaml import yaml
26
27 if t.TYPE_CHECKING:
28 from meltano.core.plugin.project_plugin import ProjectPlugin
29 from meltano.core.project import Project
30 from meltano.core.tracking import Tracker
31
32
33 def _load_yaml_from_ref(_ctx, _param, value: str | None) -> dict:
34 if not value:
35 return
36
37 try:
38 url = urlparse(value)
39 if url.scheme and url.netloc:
40 response = requests.get(value, timeout=10)
41 response.raise_for_status()
42 content = response.text
43 else:
44 content = Path(value).read_text()
45
46 except (ValueError, FileNotFoundError, IsADirectoryError) as e:
47 raise click.BadParameter(e) from e
48
49 return yaml.load(content) or {}
50
51
52 @click.command( # noqa: WPS238
53 cls=PartialInstrumentedCmd,
54 short_help="Add a plugin to your project.",
55 )
56 @click.argument("plugin_type", type=click.Choice(PluginType.cli_arguments()))
57 @click.argument("plugin_name", nargs=-1, required=True)
58 @click.option(
59 "--inherit-from",
60 help=(
61 "Add a plugin inheriting from an existing plugin in the project"
62 " or a discoverable plugin identified, by name."
63 ),
64 )
65 @click.option(
66 "--variant",
67 help="Add a specific (non-default) variant of the identified discoverable plugin.",
68 )
69 @click.option(
70 "--as",
71 "as_name",
72 help=(
73 "Shorthand for '--inherit-from', that can be used to add a "
74 "discoverable plugin to your project with a different name. "
75 "Usage:\b\n\nadd <type> <inherit-from> --as <name>"
76 ),
77 )
78 @click.option(
79 "--from-ref",
80 "plugin_yaml",
81 callback=_load_yaml_from_ref,
82 help="Reference a plugin defintion to add from.",
83 )
84 @click.option(
85 "--python",
86 help=(
87 "The Python version to use for the plugin. Only applies to base plugins which "
88 "have Python virtual environments, rather than inherited plugins which use the "
89 "virtual environment of their base plugin, or plugins that run in a container."
90 ),
91 )
92 @click.option(
93 "--custom",
94 is_flag=True,
95 help=(
96 "Add a custom plugin. The command will prompt you for the package's "
97 "base plugin description metadata."
98 ),
99 )
100 @click.option(
101 "--no-install",
102 is_flag=True,
103 help="Do not install the plugin after adding it to the project.",
104 )
105 @pass_project()
106 @click.pass_context
107 def add( # noqa: WPS238
108 ctx,
109 project: Project,
110 plugin_type: str,
111 plugin_name: str,
112 inherit_from: str | None = None,
113 variant: str | None = None,
114 as_name: str | None = None,
115 plugin_yaml: dict | None = None,
116 python: str | None = None,
117 **flags,
118 ):
119 """
120 Add a plugin to your project.
121
122 \b\nRead more at https://docs.meltano.com/reference/command-line-interface#add
123 """
124 tracker: Tracker = ctx.obj["tracker"]
125
126 plugin_type = PluginType.from_cli_argument(plugin_type)
127 plugin_names = plugin_name # nargs=-1
128
129 if as_name:
130 # `add <type> <inherit-from> --as <name>``
131 # is equivalent to:
132 # `add <type> <name> --inherit-from <inherit-from>``
133 inherit_from = plugin_names[0]
134 plugin_names = [as_name]
135
136 if flags["custom"] and plugin_type in {
137 PluginType.TRANSFORMS,
138 PluginType.ORCHESTRATORS,
139 }:
140 tracker.track_command_event(CliEvent.aborted)
141 raise CliError(f"--custom is not supported for {plugin_type}")
142
143 plugin_refs = [
144 PluginRef(plugin_type=plugin_type, name=name) for name in plugin_names
145 ]
146 dependencies_met, err = check_dependencies_met(
147 plugin_refs=plugin_refs,
148 plugins_service=project.plugins,
149 )
150 if not dependencies_met:
151 tracker.track_command_event(CliEvent.aborted)
152 raise CliError(f"Failed to install plugin(s): {err}")
153
154 add_service = ProjectAddService(project)
155
156 plugins: list[ProjectPlugin] = []
157 for plugin in plugin_names:
158 try:
159 plugins.append(
160 add_plugin(
161 plugin_type,
162 plugin,
163 python=python,
164 inherit_from=inherit_from,
165 variant=variant,
166 custom=flags["custom"],
167 add_service=add_service,
168 plugin_yaml=plugin_yaml,
169 ),
170 )
171 except Exception:
172 # if the plugin is not known to meltano send what information we do have
173 tracker.add_contexts(
174 PluginsTrackingContext([(plugin, None) for plugin in plugins]),
175 )
176 tracker.track_command_event(CliEvent.aborted)
177 raise
178
179 required_plugins = add_required_plugins(
180 plugins,
181 add_service=add_service,
182 )
183 plugins.extend(required_plugins)
184 tracker.add_contexts(
185 PluginsTrackingContext([(candidate, None) for candidate in plugins]),
186 )
187 tracker.track_command_event(CliEvent.inflight)
188
189 if not flags.get("no_install"):
190 success = install_plugins(project, plugins, reason=PluginInstallReason.ADD)
191
192 if not success:
193 tracker.track_command_event(CliEvent.failed)
194 raise CliError("Failed to install plugin(s)")
195
196 _print_plugins(plugins)
197 tracker.track_command_event(CliEvent.completed)
198
199
200 def _print_plugins(plugins):
201 printed_empty_line = False
202 for plugin in plugins:
203 docs_url = plugin.docs or plugin.repo
204 if not docs_url:
205 continue
206
207 if not printed_empty_line:
208 click.echo()
209 printed_empty_line = True
210
211 click.echo(
212 f"To learn more about {plugin.type.descriptor} '{plugin.name}', "
213 f"visit {docs_url}",
214 )
215
[end of src/meltano/cli/add.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/cli/add.py b/src/meltano/cli/add.py
--- a/src/meltano/cli/add.py
+++ b/src/meltano/cli/add.py
@@ -102,6 +102,11 @@
is_flag=True,
help="Do not install the plugin after adding it to the project.",
)
[email protected](
+ "--force-install",
+ is_flag=True,
+ help="Ignore the required Python version declared by the plugins.",
+)
@pass_project()
@click.pass_context
def add( # noqa: WPS238
@@ -187,7 +192,12 @@
tracker.track_command_event(CliEvent.inflight)
if not flags.get("no_install"):
- success = install_plugins(project, plugins, reason=PluginInstallReason.ADD)
+ success = install_plugins(
+ project,
+ plugins,
+ reason=PluginInstallReason.ADD,
+ force=flags.get("force_install", False),
+ )
if not success:
tracker.track_command_event(CliEvent.failed)
|
{"golden_diff": "diff --git a/src/meltano/cli/add.py b/src/meltano/cli/add.py\n--- a/src/meltano/cli/add.py\n+++ b/src/meltano/cli/add.py\n@@ -102,6 +102,11 @@\n is_flag=True,\n help=\"Do not install the plugin after adding it to the project.\",\n )\[email protected](\n+ \"--force-install\",\n+ is_flag=True,\n+ help=\"Ignore the required Python version declared by the plugins.\",\n+)\n @pass_project()\n @click.pass_context\n def add( # noqa: WPS238\n@@ -187,7 +192,12 @@\n tracker.track_command_event(CliEvent.inflight)\n \n if not flags.get(\"no_install\"):\n- success = install_plugins(project, plugins, reason=PluginInstallReason.ADD)\n+ success = install_plugins(\n+ project,\n+ plugins,\n+ reason=PluginInstallReason.ADD,\n+ force=flags.get(\"force_install\", False),\n+ )\n \n if not success:\n tracker.track_command_event(CliEvent.failed)\n", "issue": "feature: Set the meltano.yml path in a directory different from the project \n### Feature scope\n\nConfiguration (settings parsing, validation, etc.)\n\n### Description\n\nI really tried to make a docker container with meltano to run in AWS Lambda, but I had a lot of problems because in my case I needed to change meltano.yml in run time, but only possible to change files in AWS Lambda if this file is in /tmp.\r\n\r\nI believe could be a great feature if it is possible to set meltano.yml path by a environment variable. I tried to change this meltano source code but I had problems with the plugins installations. These changes are beyond my skills. \r\n\r\nSo I propose this new feature like other projects have (DBT as an example), where it is possible set a new configuration file or set a path for the configuration file.\n", "before_files": [{"content": "\"\"\"Plugin Add CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport typing as t\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nimport click\nimport requests\n\nfrom meltano.cli.params import pass_project\nfrom meltano.cli.utils import (\n CliError,\n PartialInstrumentedCmd,\n add_plugin,\n add_required_plugins,\n check_dependencies_met,\n install_plugins,\n)\nfrom meltano.core.plugin import PluginRef, PluginType\nfrom meltano.core.plugin_install_service import PluginInstallReason\nfrom meltano.core.project_add_service import ProjectAddService\nfrom meltano.core.tracking.contexts import CliEvent, PluginsTrackingContext\nfrom meltano.core.yaml import yaml\n\nif t.TYPE_CHECKING:\n from meltano.core.plugin.project_plugin import ProjectPlugin\n from meltano.core.project import Project\n from meltano.core.tracking import Tracker\n\n\ndef _load_yaml_from_ref(_ctx, _param, value: str | None) -> dict:\n if not value:\n return\n\n try:\n url = urlparse(value)\n if url.scheme and url.netloc:\n response = requests.get(value, timeout=10)\n response.raise_for_status()\n content = response.text\n else:\n content = Path(value).read_text()\n\n except (ValueError, FileNotFoundError, IsADirectoryError) as e:\n raise click.BadParameter(e) from e\n\n return yaml.load(content) or {}\n\n\[email protected]( # noqa: WPS238\n cls=PartialInstrumentedCmd,\n short_help=\"Add a plugin to your project.\",\n)\[email protected](\"plugin_type\", type=click.Choice(PluginType.cli_arguments()))\[email protected](\"plugin_name\", nargs=-1, required=True)\[email protected](\n \"--inherit-from\",\n help=(\n \"Add a plugin inheriting from an existing plugin in the project\"\n \" or a discoverable plugin identified, by name.\"\n ),\n)\[email protected](\n \"--variant\",\n help=\"Add a specific (non-default) variant of the identified discoverable plugin.\",\n)\[email protected](\n \"--as\",\n \"as_name\",\n help=(\n \"Shorthand for '--inherit-from', that can be used to add a \"\n \"discoverable plugin to your project with a different name. \"\n \"Usage:\\b\\n\\nadd <type> <inherit-from> --as <name>\"\n ),\n)\[email protected](\n \"--from-ref\",\n \"plugin_yaml\",\n callback=_load_yaml_from_ref,\n help=\"Reference a plugin defintion to add from.\",\n)\[email protected](\n \"--python\",\n help=(\n \"The Python version to use for the plugin. Only applies to base plugins which \"\n \"have Python virtual environments, rather than inherited plugins which use the \"\n \"virtual environment of their base plugin, or plugins that run in a container.\"\n ),\n)\[email protected](\n \"--custom\",\n is_flag=True,\n help=(\n \"Add a custom plugin. The command will prompt you for the package's \"\n \"base plugin description metadata.\"\n ),\n)\[email protected](\n \"--no-install\",\n is_flag=True,\n help=\"Do not install the plugin after adding it to the project.\",\n)\n@pass_project()\[email protected]_context\ndef add( # noqa: WPS238\n ctx,\n project: Project,\n plugin_type: str,\n plugin_name: str,\n inherit_from: str | None = None,\n variant: str | None = None,\n as_name: str | None = None,\n plugin_yaml: dict | None = None,\n python: str | None = None,\n **flags,\n):\n \"\"\"\n Add a plugin to your project.\n\n \\b\\nRead more at https://docs.meltano.com/reference/command-line-interface#add\n \"\"\"\n tracker: Tracker = ctx.obj[\"tracker\"]\n\n plugin_type = PluginType.from_cli_argument(plugin_type)\n plugin_names = plugin_name # nargs=-1\n\n if as_name:\n # `add <type> <inherit-from> --as <name>``\n # is equivalent to:\n # `add <type> <name> --inherit-from <inherit-from>``\n inherit_from = plugin_names[0]\n plugin_names = [as_name]\n\n if flags[\"custom\"] and plugin_type in {\n PluginType.TRANSFORMS,\n PluginType.ORCHESTRATORS,\n }:\n tracker.track_command_event(CliEvent.aborted)\n raise CliError(f\"--custom is not supported for {plugin_type}\")\n\n plugin_refs = [\n PluginRef(plugin_type=plugin_type, name=name) for name in plugin_names\n ]\n dependencies_met, err = check_dependencies_met(\n plugin_refs=plugin_refs,\n plugins_service=project.plugins,\n )\n if not dependencies_met:\n tracker.track_command_event(CliEvent.aborted)\n raise CliError(f\"Failed to install plugin(s): {err}\")\n\n add_service = ProjectAddService(project)\n\n plugins: list[ProjectPlugin] = []\n for plugin in plugin_names:\n try:\n plugins.append(\n add_plugin(\n plugin_type,\n plugin,\n python=python,\n inherit_from=inherit_from,\n variant=variant,\n custom=flags[\"custom\"],\n add_service=add_service,\n plugin_yaml=plugin_yaml,\n ),\n )\n except Exception:\n # if the plugin is not known to meltano send what information we do have\n tracker.add_contexts(\n PluginsTrackingContext([(plugin, None) for plugin in plugins]),\n )\n tracker.track_command_event(CliEvent.aborted)\n raise\n\n required_plugins = add_required_plugins(\n plugins,\n add_service=add_service,\n )\n plugins.extend(required_plugins)\n tracker.add_contexts(\n PluginsTrackingContext([(candidate, None) for candidate in plugins]),\n )\n tracker.track_command_event(CliEvent.inflight)\n\n if not flags.get(\"no_install\"):\n success = install_plugins(project, plugins, reason=PluginInstallReason.ADD)\n\n if not success:\n tracker.track_command_event(CliEvent.failed)\n raise CliError(\"Failed to install plugin(s)\")\n\n _print_plugins(plugins)\n tracker.track_command_event(CliEvent.completed)\n\n\ndef _print_plugins(plugins):\n printed_empty_line = False\n for plugin in plugins:\n docs_url = plugin.docs or plugin.repo\n if not docs_url:\n continue\n\n if not printed_empty_line:\n click.echo()\n printed_empty_line = True\n\n click.echo(\n f\"To learn more about {plugin.type.descriptor} '{plugin.name}', \"\n f\"visit {docs_url}\",\n )\n", "path": "src/meltano/cli/add.py"}]}
| 2,666 | 239 |
gh_patches_debug_803
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-871
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'googleapiclient' has no attribute '__version__'
When importing new version of google-api-python-client
`from apiclient import discovery`
i'm getting the error
`AttributeError: module 'googleapiclient' has no attribute '__version__'`
https://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22
i guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py`
https://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15
can you please fix? @busunkim96 @mik-laj @crwilcox
Traceback:
``` from apiclient import discovery
File "/usr/local/lib/python3.7/site-packages/apiclient/__init__.py", line 22, in <module>
__version__ = googleapiclient.__version__
AttributeError: module 'googleapiclient' has no attribute '__version__'
</issue>
<code>
[start of apiclient/__init__.py]
1 """Retain apiclient as an alias for googleapiclient."""
2
3 from six import iteritems
4
5 import googleapiclient
6
7 from googleapiclient import channel
8 from googleapiclient import discovery
9 from googleapiclient import errors
10 from googleapiclient import http
11 from googleapiclient import mimeparse
12 from googleapiclient import model
13
14 try:
15 from googleapiclient import sample_tools
16 except ImportError:
17 # Silently ignore, because the vast majority of consumers won't use it and
18 # it has deep dependence on oauth2client, an optional dependency.
19 sample_tools = None
20 from googleapiclient import schema
21
22 __version__ = googleapiclient.__version__
23
24 _SUBMODULES = {
25 "channel": channel,
26 "discovery": discovery,
27 "errors": errors,
28 "http": http,
29 "mimeparse": mimeparse,
30 "model": model,
31 "sample_tools": sample_tools,
32 "schema": schema,
33 }
34
35 import sys
36
37 for module_name, module in iteritems(_SUBMODULES):
38 sys.modules["apiclient.%s" % module_name] = module
39
[end of apiclient/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apiclient/__init__.py b/apiclient/__init__.py
--- a/apiclient/__init__.py
+++ b/apiclient/__init__.py
@@ -19,8 +19,6 @@
sample_tools = None
from googleapiclient import schema
-__version__ = googleapiclient.__version__
-
_SUBMODULES = {
"channel": channel,
"discovery": discovery,
|
{"golden_diff": "diff --git a/apiclient/__init__.py b/apiclient/__init__.py\n--- a/apiclient/__init__.py\n+++ b/apiclient/__init__.py\n@@ -19,8 +19,6 @@\n sample_tools = None\n from googleapiclient import schema\n \n-__version__ = googleapiclient.__version__\n-\n _SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n", "issue": "AttributeError: module 'googleapiclient' has no attribute '__version__'\nWhen importing new version of google-api-python-client\r\n`from apiclient import discovery`\r\ni'm getting the error\r\n`AttributeError: module 'googleapiclient' has no attribute '__version__'`\r\n\r\nhttps://github.com/googleapis/google-api-python-client/blob/84d45619d753cb04d957651886231034194058b6/apiclient/__init__.py#L22\r\n\r\ni guess this happens since you have removed `__version__` var from `googleapiclient/__init__.py`\r\n\r\nhttps://github.com/googleapis/google-api-python-client/commit/f706cfd821ab7457e5db37abfc3619772657dd0e#diff-b926d296d4c856bcbf877809e4523562L15\r\n\r\ncan you please fix? @busunkim96 @mik-laj @crwilcox \r\n\r\nTraceback:\r\n``` from apiclient import discovery\r\n File \"/usr/local/lib/python3.7/site-packages/apiclient/__init__.py\", line 22, in <module>\r\n __version__ = googleapiclient.__version__\r\nAttributeError: module 'googleapiclient' has no attribute '__version__'\n", "before_files": [{"content": "\"\"\"Retain apiclient as an alias for googleapiclient.\"\"\"\n\nfrom six import iteritems\n\nimport googleapiclient\n\nfrom googleapiclient import channel\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nfrom googleapiclient import http\nfrom googleapiclient import mimeparse\nfrom googleapiclient import model\n\ntry:\n from googleapiclient import sample_tools\nexcept ImportError:\n # Silently ignore, because the vast majority of consumers won't use it and\n # it has deep dependence on oauth2client, an optional dependency.\n sample_tools = None\nfrom googleapiclient import schema\n\n__version__ = googleapiclient.__version__\n\n_SUBMODULES = {\n \"channel\": channel,\n \"discovery\": discovery,\n \"errors\": errors,\n \"http\": http,\n \"mimeparse\": mimeparse,\n \"model\": model,\n \"sample_tools\": sample_tools,\n \"schema\": schema,\n}\n\nimport sys\n\nfor module_name, module in iteritems(_SUBMODULES):\n sys.modules[\"apiclient.%s\" % module_name] = module\n", "path": "apiclient/__init__.py"}]}
| 1,182 | 98 |
gh_patches_debug_6271
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-4917
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem with filtering products
### What I'm trying to achieve
I want to filter out products from grocerries category but I receive results even when I should not.
### Steps to reproduce the problem
In groceries category there are two beers that have `abv` attribute with values: 51(5,1% alc) and 67(6,75 alc) but when filter with `abv` value set to 52 we receive `Carrot juice`
Correct query:
```
query{
products(first: 100, attributes: ["abv:51"]){
edges {
node {
id
name
}
}
}
}
```
and correct result:
```
{
"data": {
"products": {
"edges": [
{
"node": {
"id": "UHJvZHVjdDo4Mw==",
"name": "Seaman Lager"
}
}
]
}
}
}
```
Query with invalid value(abv set to 52):
```
query{
products(first: 100, attributes: ["abv:52"]){
edges {
node {
id
name
}
}
}
}
```
invalid result:
```
{
"data": {
"products": {
"edges": [
{
"node": {
"id": "UHJvZHVjdDo3Mw==",
"name": "Carrot Juice"
}
}
]
}
}
}
```
### What I expected to happen
We should receive empty product list if passing abv value 52.
</issue>
<code>
[start of saleor/graphql/product/filters.py]
1 from collections import defaultdict
2
3 import django_filters
4 from django.db.models import Q, Sum
5 from graphene_django.filter import GlobalIDFilter, GlobalIDMultipleChoiceFilter
6
7 from ...product.filters import (
8 T_PRODUCT_FILTER_QUERIES,
9 filter_products_by_attributes_values,
10 )
11 from ...product.models import Attribute, Category, Collection, Product, ProductType
12 from ...search.backends import picker
13 from ..core.filters import EnumFilter, ListObjectTypeFilter, ObjectTypeFilter
14 from ..core.types import FilterInputObjectType
15 from ..core.types.common import PriceRangeInput
16 from ..core.utils import from_global_id_strict_type
17 from ..utils import filter_by_query_param, get_nodes
18 from . import types
19 from .enums import (
20 CollectionPublished,
21 ProductTypeConfigurable,
22 ProductTypeEnum,
23 StockAvailability,
24 )
25 from .types.attributes import AttributeInput
26
27
28 def filter_fields_containing_value(*search_fields: str):
29 """Create a icontains filters through given fields on a given query set object."""
30
31 def _filter_qs(qs, _, value):
32 if value:
33 qs = filter_by_query_param(qs, value, search_fields)
34 return qs
35
36 return _filter_qs
37
38
39 def _clean_product_attributes_filter_input(filter_value) -> T_PRODUCT_FILTER_QUERIES:
40 attributes = Attribute.objects.prefetch_related("values")
41 attributes_map = {attribute.slug: attribute.pk for attribute in attributes}
42 values_map = {
43 attr.slug: {value.slug: value.pk for value in attr.values.all()}
44 for attr in attributes
45 }
46 queries = defaultdict(list)
47
48 # Convert attribute:value pairs into a dictionary where
49 # attributes are keys and values are grouped in lists
50 for attr_name, val_slug in filter_value:
51 if attr_name not in attributes_map:
52 raise ValueError("Unknown attribute name: %r" % (attr_name,))
53 attr_pk = attributes_map[attr_name]
54 attr_val_pk = values_map[attr_name].get(val_slug, val_slug)
55 queries[attr_pk].append(attr_val_pk)
56
57 return queries
58
59
60 def filter_products_by_attributes(qs, filter_value):
61 queries = _clean_product_attributes_filter_input(filter_value)
62 return filter_products_by_attributes_values(qs, queries)
63
64
65 def filter_products_by_price(qs, price_lte=None, price_gte=None):
66 if price_lte:
67 qs = qs.filter(price_amount__lte=price_lte)
68 if price_gte:
69 qs = qs.filter(price_amount__gte=price_gte)
70 return qs
71
72
73 def filter_products_by_minimal_price(
74 qs, minimal_price_lte=None, minimal_price_gte=None
75 ):
76 if minimal_price_lte:
77 qs = qs.filter(minimal_variant_price_amount__lte=minimal_price_lte)
78 if minimal_price_gte:
79 qs = qs.filter(minimal_variant_price_amount__gte=minimal_price_gte)
80 return qs
81
82
83 def filter_products_by_categories(qs, categories):
84 categories = [
85 category.get_descendants(include_self=True) for category in categories
86 ]
87 ids = {category.id for tree in categories for category in tree}
88 return qs.filter(category__in=ids)
89
90
91 def filter_products_by_collections(qs, collections):
92 return qs.filter(collections__in=collections)
93
94
95 def sort_qs(qs, sort_by):
96 if sort_by:
97 qs = qs.order_by(sort_by["direction"] + sort_by["field"])
98 return qs
99
100
101 def filter_products_by_stock_availability(qs, stock_availability):
102 qs = qs.annotate(total_quantity=Sum("variants__quantity"))
103 if stock_availability == StockAvailability.IN_STOCK:
104 qs = qs.filter(total_quantity__gt=0)
105 elif stock_availability == StockAvailability.OUT_OF_STOCK:
106 qs = qs.filter(total_quantity=0)
107 return qs
108
109
110 def filter_attributes(qs, _, value):
111 if value:
112 value = [(v["slug"], v["value"]) for v in value]
113 qs = filter_products_by_attributes(qs, value)
114 return qs
115
116
117 def filter_categories(qs, _, value):
118 if value:
119 categories = get_nodes(value, types.Category)
120 qs = filter_products_by_categories(qs, categories)
121 return qs
122
123
124 def filter_collections(qs, _, value):
125 if value:
126 collections = get_nodes(value, types.Collection)
127 qs = filter_products_by_collections(qs, collections)
128 return qs
129
130
131 def filter_price(qs, _, value):
132 qs = filter_products_by_price(
133 qs, price_lte=value.get("lte"), price_gte=value.get("gte")
134 )
135 return qs
136
137
138 def filter_minimal_price(qs, _, value):
139 qs = filter_products_by_minimal_price(
140 qs, minimal_price_lte=value.get("lte"), minimal_price_gte=value.get("gte")
141 )
142 return qs
143
144
145 def filter_stock_availability(qs, _, value):
146 if value:
147 qs = filter_products_by_stock_availability(qs, value)
148 return qs
149
150
151 def filter_search(qs, _, value):
152 if value:
153 search = picker.pick_backend()
154 qs &= search(value).distinct()
155 return qs
156
157
158 def filter_collection_publish(qs, _, value):
159 if value == CollectionPublished.PUBLISHED:
160 qs = qs.filter(is_published=True)
161 elif value == CollectionPublished.HIDDEN:
162 qs = qs.filter(is_published=False)
163 return qs
164
165
166 def filter_product_type_configurable(qs, _, value):
167 if value == ProductTypeConfigurable.CONFIGURABLE:
168 qs = qs.filter(has_variants=True)
169 elif value == ProductTypeConfigurable.SIMPLE:
170 qs = qs.filter(has_variants=False)
171 return qs
172
173
174 def filter_product_type(qs, _, value):
175 if value == ProductTypeEnum.DIGITAL:
176 qs = qs.filter(is_digital=True)
177 elif value == ProductTypeEnum.SHIPPABLE:
178 qs = qs.filter(is_shipping_required=True)
179 return qs
180
181
182 def filter_attributes_by_product_types(qs, field, value):
183 if not value:
184 return qs
185
186 if field == "in_category":
187 category_id = from_global_id_strict_type(
188 value, only_type="Category", field=field
189 )
190 category = Category.objects.filter(pk=category_id).first()
191
192 if category is None:
193 return qs.none()
194
195 tree = category.get_descendants(include_self=True)
196 product_qs = Product.objects.filter(category__in=tree)
197
198 elif field == "in_collection":
199 collection_id = from_global_id_strict_type(
200 value, only_type="Collection", field=field
201 )
202 product_qs = Product.objects.filter(collections__id=collection_id)
203
204 else:
205 raise NotImplementedError(f"Filtering by {field} is unsupported")
206
207 product_types = set(product_qs.values_list("product_type_id", flat=True))
208 return qs.filter(
209 Q(product_types__in=product_types) | Q(product_variant_types__in=product_types)
210 )
211
212
213 class ProductFilter(django_filters.FilterSet):
214 is_published = django_filters.BooleanFilter()
215 collections = GlobalIDMultipleChoiceFilter(method=filter_collections)
216 categories = GlobalIDMultipleChoiceFilter(method=filter_categories)
217 price = ObjectTypeFilter(
218 input_class=PriceRangeInput, method=filter_price, field_name="price_amount"
219 )
220 minimal_price = ObjectTypeFilter(
221 input_class=PriceRangeInput,
222 method=filter_minimal_price,
223 field_name="minimal_price_amount",
224 )
225 attributes = ListObjectTypeFilter(
226 input_class=AttributeInput, method=filter_attributes
227 )
228 stock_availability = EnumFilter(
229 input_class=StockAvailability, method=filter_stock_availability
230 )
231 product_type = GlobalIDFilter()
232 search = django_filters.CharFilter(method=filter_search)
233
234 class Meta:
235 model = Product
236 fields = [
237 "is_published",
238 "collections",
239 "categories",
240 "price",
241 "attributes",
242 "stock_availability",
243 "product_type",
244 "search",
245 ]
246
247
248 class CollectionFilter(django_filters.FilterSet):
249 published = EnumFilter(
250 input_class=CollectionPublished, method=filter_collection_publish
251 )
252 search = django_filters.CharFilter(
253 method=filter_fields_containing_value("slug", "name")
254 )
255
256 class Meta:
257 model = Collection
258 fields = ["published", "search"]
259
260
261 class CategoryFilter(django_filters.FilterSet):
262 search = django_filters.CharFilter(
263 method=filter_fields_containing_value("slug", "name", "description")
264 )
265
266 class Meta:
267 model = Category
268 fields = ["search"]
269
270
271 class ProductTypeFilter(django_filters.FilterSet):
272 search = django_filters.CharFilter(method=filter_fields_containing_value("name"))
273
274 configurable = EnumFilter(
275 input_class=ProductTypeConfigurable, method=filter_product_type_configurable
276 )
277
278 product_type = EnumFilter(input_class=ProductTypeEnum, method=filter_product_type)
279
280 class Meta:
281 model = ProductType
282 fields = ["search", "configurable", "product_type"]
283
284
285 class AttributeFilter(django_filters.FilterSet):
286 # Search by attribute name and slug
287 search = django_filters.CharFilter(
288 method=filter_fields_containing_value("slug", "name")
289 )
290 ids = GlobalIDMultipleChoiceFilter(field_name="id")
291
292 in_collection = GlobalIDFilter(method=filter_attributes_by_product_types)
293 in_category = GlobalIDFilter(method=filter_attributes_by_product_types)
294
295 class Meta:
296 model = Attribute
297 fields = [
298 "value_required",
299 "is_variant_only",
300 "visible_in_storefront",
301 "filterable_in_storefront",
302 "filterable_in_dashboard",
303 "available_in_grid",
304 ]
305
306
307 class ProductFilterInput(FilterInputObjectType):
308 class Meta:
309 filterset_class = ProductFilter
310
311
312 class CollectionFilterInput(FilterInputObjectType):
313 class Meta:
314 filterset_class = CollectionFilter
315
316
317 class CategoryFilterInput(FilterInputObjectType):
318 class Meta:
319 filterset_class = CategoryFilter
320
321
322 class ProductTypeFilterInput(FilterInputObjectType):
323 class Meta:
324 filterset_class = ProductTypeFilter
325
326
327 class AttributeFilterInput(FilterInputObjectType):
328 class Meta:
329 filterset_class = AttributeFilter
330
[end of saleor/graphql/product/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/graphql/product/filters.py b/saleor/graphql/product/filters.py
--- a/saleor/graphql/product/filters.py
+++ b/saleor/graphql/product/filters.py
@@ -51,7 +51,7 @@
if attr_name not in attributes_map:
raise ValueError("Unknown attribute name: %r" % (attr_name,))
attr_pk = attributes_map[attr_name]
- attr_val_pk = values_map[attr_name].get(val_slug, val_slug)
+ attr_val_pk = values_map[attr_name].get(val_slug)
queries[attr_pk].append(attr_val_pk)
return queries
|
{"golden_diff": "diff --git a/saleor/graphql/product/filters.py b/saleor/graphql/product/filters.py\n--- a/saleor/graphql/product/filters.py\n+++ b/saleor/graphql/product/filters.py\n@@ -51,7 +51,7 @@\n if attr_name not in attributes_map:\n raise ValueError(\"Unknown attribute name: %r\" % (attr_name,))\n attr_pk = attributes_map[attr_name]\n- attr_val_pk = values_map[attr_name].get(val_slug, val_slug)\n+ attr_val_pk = values_map[attr_name].get(val_slug)\n queries[attr_pk].append(attr_val_pk)\n \n return queries\n", "issue": "Problem with filtering products\n### What I'm trying to achieve\r\nI want to filter out products from grocerries category but I receive results even when I should not.\r\n\r\n### Steps to reproduce the problem\r\nIn groceries category there are two beers that have `abv` attribute with values: 51(5,1% alc) and 67(6,75 alc) but when filter with `abv` value set to 52 we receive `Carrot juice` \r\n\r\nCorrect query:\r\n```\r\nquery{\t\r\n products(first: 100, attributes: [\"abv:51\"]){\r\n \tedges {\r\n node {\r\n id\r\n name\r\n }\r\n }\r\n }\r\n}\r\n```\r\nand correct result:\r\n```\r\n{\r\n \"data\": {\r\n \"products\": {\r\n \"edges\": [\r\n {\r\n \"node\": {\r\n \"id\": \"UHJvZHVjdDo4Mw==\",\r\n \"name\": \"Seaman Lager\"\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n\r\nQuery with invalid value(abv set to 52):\r\n```\r\nquery{\t\r\n products(first: 100, attributes: [\"abv:52\"]){\r\n \tedges {\r\n node {\r\n id\r\n name\r\n }\r\n }\r\n }\r\n}\r\n```\r\ninvalid result:\r\n```\r\n{\r\n \"data\": {\r\n \"products\": {\r\n \"edges\": [\r\n {\r\n \"node\": {\r\n \"id\": \"UHJvZHVjdDo3Mw==\",\r\n \"name\": \"Carrot Juice\"\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n\r\n### What I expected to happen\r\nWe should receive empty product list if passing abv value 52.\n", "before_files": [{"content": "from collections import defaultdict\n\nimport django_filters\nfrom django.db.models import Q, Sum\nfrom graphene_django.filter import GlobalIDFilter, GlobalIDMultipleChoiceFilter\n\nfrom ...product.filters import (\n T_PRODUCT_FILTER_QUERIES,\n filter_products_by_attributes_values,\n)\nfrom ...product.models import Attribute, Category, Collection, Product, ProductType\nfrom ...search.backends import picker\nfrom ..core.filters import EnumFilter, ListObjectTypeFilter, ObjectTypeFilter\nfrom ..core.types import FilterInputObjectType\nfrom ..core.types.common import PriceRangeInput\nfrom ..core.utils import from_global_id_strict_type\nfrom ..utils import filter_by_query_param, get_nodes\nfrom . import types\nfrom .enums import (\n CollectionPublished,\n ProductTypeConfigurable,\n ProductTypeEnum,\n StockAvailability,\n)\nfrom .types.attributes import AttributeInput\n\n\ndef filter_fields_containing_value(*search_fields: str):\n \"\"\"Create a icontains filters through given fields on a given query set object.\"\"\"\n\n def _filter_qs(qs, _, value):\n if value:\n qs = filter_by_query_param(qs, value, search_fields)\n return qs\n\n return _filter_qs\n\n\ndef _clean_product_attributes_filter_input(filter_value) -> T_PRODUCT_FILTER_QUERIES:\n attributes = Attribute.objects.prefetch_related(\"values\")\n attributes_map = {attribute.slug: attribute.pk for attribute in attributes}\n values_map = {\n attr.slug: {value.slug: value.pk for value in attr.values.all()}\n for attr in attributes\n }\n queries = defaultdict(list)\n\n # Convert attribute:value pairs into a dictionary where\n # attributes are keys and values are grouped in lists\n for attr_name, val_slug in filter_value:\n if attr_name not in attributes_map:\n raise ValueError(\"Unknown attribute name: %r\" % (attr_name,))\n attr_pk = attributes_map[attr_name]\n attr_val_pk = values_map[attr_name].get(val_slug, val_slug)\n queries[attr_pk].append(attr_val_pk)\n\n return queries\n\n\ndef filter_products_by_attributes(qs, filter_value):\n queries = _clean_product_attributes_filter_input(filter_value)\n return filter_products_by_attributes_values(qs, queries)\n\n\ndef filter_products_by_price(qs, price_lte=None, price_gte=None):\n if price_lte:\n qs = qs.filter(price_amount__lte=price_lte)\n if price_gte:\n qs = qs.filter(price_amount__gte=price_gte)\n return qs\n\n\ndef filter_products_by_minimal_price(\n qs, minimal_price_lte=None, minimal_price_gte=None\n):\n if minimal_price_lte:\n qs = qs.filter(minimal_variant_price_amount__lte=minimal_price_lte)\n if minimal_price_gte:\n qs = qs.filter(minimal_variant_price_amount__gte=minimal_price_gte)\n return qs\n\n\ndef filter_products_by_categories(qs, categories):\n categories = [\n category.get_descendants(include_self=True) for category in categories\n ]\n ids = {category.id for tree in categories for category in tree}\n return qs.filter(category__in=ids)\n\n\ndef filter_products_by_collections(qs, collections):\n return qs.filter(collections__in=collections)\n\n\ndef sort_qs(qs, sort_by):\n if sort_by:\n qs = qs.order_by(sort_by[\"direction\"] + sort_by[\"field\"])\n return qs\n\n\ndef filter_products_by_stock_availability(qs, stock_availability):\n qs = qs.annotate(total_quantity=Sum(\"variants__quantity\"))\n if stock_availability == StockAvailability.IN_STOCK:\n qs = qs.filter(total_quantity__gt=0)\n elif stock_availability == StockAvailability.OUT_OF_STOCK:\n qs = qs.filter(total_quantity=0)\n return qs\n\n\ndef filter_attributes(qs, _, value):\n if value:\n value = [(v[\"slug\"], v[\"value\"]) for v in value]\n qs = filter_products_by_attributes(qs, value)\n return qs\n\n\ndef filter_categories(qs, _, value):\n if value:\n categories = get_nodes(value, types.Category)\n qs = filter_products_by_categories(qs, categories)\n return qs\n\n\ndef filter_collections(qs, _, value):\n if value:\n collections = get_nodes(value, types.Collection)\n qs = filter_products_by_collections(qs, collections)\n return qs\n\n\ndef filter_price(qs, _, value):\n qs = filter_products_by_price(\n qs, price_lte=value.get(\"lte\"), price_gte=value.get(\"gte\")\n )\n return qs\n\n\ndef filter_minimal_price(qs, _, value):\n qs = filter_products_by_minimal_price(\n qs, minimal_price_lte=value.get(\"lte\"), minimal_price_gte=value.get(\"gte\")\n )\n return qs\n\n\ndef filter_stock_availability(qs, _, value):\n if value:\n qs = filter_products_by_stock_availability(qs, value)\n return qs\n\n\ndef filter_search(qs, _, value):\n if value:\n search = picker.pick_backend()\n qs &= search(value).distinct()\n return qs\n\n\ndef filter_collection_publish(qs, _, value):\n if value == CollectionPublished.PUBLISHED:\n qs = qs.filter(is_published=True)\n elif value == CollectionPublished.HIDDEN:\n qs = qs.filter(is_published=False)\n return qs\n\n\ndef filter_product_type_configurable(qs, _, value):\n if value == ProductTypeConfigurable.CONFIGURABLE:\n qs = qs.filter(has_variants=True)\n elif value == ProductTypeConfigurable.SIMPLE:\n qs = qs.filter(has_variants=False)\n return qs\n\n\ndef filter_product_type(qs, _, value):\n if value == ProductTypeEnum.DIGITAL:\n qs = qs.filter(is_digital=True)\n elif value == ProductTypeEnum.SHIPPABLE:\n qs = qs.filter(is_shipping_required=True)\n return qs\n\n\ndef filter_attributes_by_product_types(qs, field, value):\n if not value:\n return qs\n\n if field == \"in_category\":\n category_id = from_global_id_strict_type(\n value, only_type=\"Category\", field=field\n )\n category = Category.objects.filter(pk=category_id).first()\n\n if category is None:\n return qs.none()\n\n tree = category.get_descendants(include_self=True)\n product_qs = Product.objects.filter(category__in=tree)\n\n elif field == \"in_collection\":\n collection_id = from_global_id_strict_type(\n value, only_type=\"Collection\", field=field\n )\n product_qs = Product.objects.filter(collections__id=collection_id)\n\n else:\n raise NotImplementedError(f\"Filtering by {field} is unsupported\")\n\n product_types = set(product_qs.values_list(\"product_type_id\", flat=True))\n return qs.filter(\n Q(product_types__in=product_types) | Q(product_variant_types__in=product_types)\n )\n\n\nclass ProductFilter(django_filters.FilterSet):\n is_published = django_filters.BooleanFilter()\n collections = GlobalIDMultipleChoiceFilter(method=filter_collections)\n categories = GlobalIDMultipleChoiceFilter(method=filter_categories)\n price = ObjectTypeFilter(\n input_class=PriceRangeInput, method=filter_price, field_name=\"price_amount\"\n )\n minimal_price = ObjectTypeFilter(\n input_class=PriceRangeInput,\n method=filter_minimal_price,\n field_name=\"minimal_price_amount\",\n )\n attributes = ListObjectTypeFilter(\n input_class=AttributeInput, method=filter_attributes\n )\n stock_availability = EnumFilter(\n input_class=StockAvailability, method=filter_stock_availability\n )\n product_type = GlobalIDFilter()\n search = django_filters.CharFilter(method=filter_search)\n\n class Meta:\n model = Product\n fields = [\n \"is_published\",\n \"collections\",\n \"categories\",\n \"price\",\n \"attributes\",\n \"stock_availability\",\n \"product_type\",\n \"search\",\n ]\n\n\nclass CollectionFilter(django_filters.FilterSet):\n published = EnumFilter(\n input_class=CollectionPublished, method=filter_collection_publish\n )\n search = django_filters.CharFilter(\n method=filter_fields_containing_value(\"slug\", \"name\")\n )\n\n class Meta:\n model = Collection\n fields = [\"published\", \"search\"]\n\n\nclass CategoryFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(\n method=filter_fields_containing_value(\"slug\", \"name\", \"description\")\n )\n\n class Meta:\n model = Category\n fields = [\"search\"]\n\n\nclass ProductTypeFilter(django_filters.FilterSet):\n search = django_filters.CharFilter(method=filter_fields_containing_value(\"name\"))\n\n configurable = EnumFilter(\n input_class=ProductTypeConfigurable, method=filter_product_type_configurable\n )\n\n product_type = EnumFilter(input_class=ProductTypeEnum, method=filter_product_type)\n\n class Meta:\n model = ProductType\n fields = [\"search\", \"configurable\", \"product_type\"]\n\n\nclass AttributeFilter(django_filters.FilterSet):\n # Search by attribute name and slug\n search = django_filters.CharFilter(\n method=filter_fields_containing_value(\"slug\", \"name\")\n )\n ids = GlobalIDMultipleChoiceFilter(field_name=\"id\")\n\n in_collection = GlobalIDFilter(method=filter_attributes_by_product_types)\n in_category = GlobalIDFilter(method=filter_attributes_by_product_types)\n\n class Meta:\n model = Attribute\n fields = [\n \"value_required\",\n \"is_variant_only\",\n \"visible_in_storefront\",\n \"filterable_in_storefront\",\n \"filterable_in_dashboard\",\n \"available_in_grid\",\n ]\n\n\nclass ProductFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = ProductFilter\n\n\nclass CollectionFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = CollectionFilter\n\n\nclass CategoryFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = CategoryFilter\n\n\nclass ProductTypeFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = ProductTypeFilter\n\n\nclass AttributeFilterInput(FilterInputObjectType):\n class Meta:\n filterset_class = AttributeFilter\n", "path": "saleor/graphql/product/filters.py"}]}
| 3,989 | 138 |
gh_patches_debug_2263
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5321
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/accelerator/npu_accelerator.py]
1 #!/usr/bin/env python
2
3 from typing import Any, Callable, Dict, List, Optional, Tuple, Union
4
5 import torch
6 import torch.distributed as dist
7
8 from .base_accelerator import BaseAccelerator
9
10 try:
11 import torch_npu # noqa
12 except ImportError:
13 pass
14
15
16 __all__ = ["NpuAccelerator"]
17
18
19 class NpuAccelerator(BaseAccelerator):
20 """
21 Accelerator class for Huawei NPU devices.
22 """
23
24 def __init__(self):
25 super().__init__(name="npu", communication_backend="hccl", is_synchronous=False)
26
27 # =======================
28 # device APIs
29 # =======================
30 def get_version(self) -> str:
31 """
32 Return the version of the accelerator which torch is built against.
33 """
34 return torch.version.npu
35
36 def get_current_device(self) -> torch.device:
37 """
38 Return the current device.
39 """
40 return torch.device(f"npu:{torch.npu.current_device()}")
41
42 def current_device(self) -> int:
43 """
44 Return the current device index.
45 """
46 return torch.npu.current_device()
47
48 def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None:
49 """
50 Bind the current process to a device.
51 """
52 if device is None:
53 if not dist.is_initialized():
54 raise RuntimeError("Cannot get current device when distributed is not initialized.")
55 device = dist.get_rank() % self.device_count()
56 torch.npu.set_device(device)
57
58 def get_device_name(self, device: Union[torch.device, int]) -> str:
59 """
60 Return the name of the device.
61 """
62 return torch.npu.get_device_name(device)
63
64 def synchronize(self, device: Union[torch.device, int] = None):
65 """
66 Synchronize the current process.
67 """
68 torch.npu.synchronize(device)
69
70 def is_available(self):
71 """
72 Check if the accelerator is available.
73 """
74 return torch.npu.is_available()
75
76 def device_count(self):
77 """
78 Return the number of devices on the machine.
79 """
80 return torch.npu.device_count()
81
82 def get_device_capability(self, device=None) -> Tuple[int, int]:
83 """
84 Gets the npu capability of a device.
85 """
86 return torch.npu.get_device_capability(device)
87
88 def get_device_name(self, device=None) -> str:
89 """
90 Gets the name of a device.
91 """
92 return torch.npu.get_device_name(device)
93
94 def get_device_properties(self, device):
95 """
96 Gets the properties of a device.
97 """
98 return torch.npu.get_device_properties(device)
99
100 def utilization(self, device=None) -> int:
101 """
102 Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi
103 """
104 return torch.npu.utilization(device)
105
106 # =======================
107 # random number generator APIs
108 # =======================
109 def get_rng_state(self, device="npu") -> torch.Tensor:
110 """
111 Returns the random number generator state of the specified GPU as a ByteTensor.
112 """
113 return torch.npu.get_rng_state(device)
114
115 def get_rng_state_all(self) -> List[torch.Tensor]:
116 """
117 Returns a list of ByteTensor representing the random number states of all devices.
118 """
119 return torch.npu.get_rng_state_all()
120
121 def set_rng_state(self, new_state: torch.ByteTensor, device: str = "npu") -> None:
122 """
123 Sets the random number generator state of the specified GPU.
124 """
125 torch.npu.set_rng_state(new_state, device)
126
127 def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None:
128 """
129 Sets the random number generator state of all devices.
130 """
131 torch.npu.set_rng_state_all(new_states)
132
133 def manual_seed(self, seed: int) -> None:
134 """
135 Sets the seed for generating random numbers for the current GPU.
136 """
137 torch.npu.manual_seed(seed)
138
139 def manual_seed_all(self, seed: int) -> None:
140 """
141 Set the random seed for the all processes.
142 """
143 torch.npu.manual_seed_all(seed)
144
145 def seed(self) -> None:
146 """
147 Sets the seed for generating random numbers to a random number for the current GPU.
148 """
149 torch.npu.seed()
150
151 def seed_all(self) -> None:
152 """
153 Sets the seed for generating random numbers to a random number on all GPUs.
154 """
155 torch.npu.seed_all()
156
157 def initial_seed(self) -> int:
158 """
159 Returns the current random seed of the current GPU.
160 """
161 return torch.npu.initial_seed()
162
163 # =======================
164 # memory management APIs
165 # =======================
166
167 def empty_cache(self) -> None:
168 """
169 Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi.
170 """
171 torch.npu.empty_cache()
172
173 def memory_stats(self, device=None) -> Dict[str, Any]:
174 """
175 Returns a dictionary of npu memory allocator statistics for a given device.
176 """
177 return torch.npu.memory_stats(device=device)
178
179 def memory_summary(self, device=None, abbreviated=False) -> str:
180 """
181 Returns a human-readable printout of the current memory allocator statistics for a given device.
182 """
183 return torch.npu.memory_summary(device=device, abbreviated=abbreviated)
184
185 def memory_snapshot(self):
186 """
187 Returns a snapshot of the npu memory allocator state across all devices.
188 """
189 return torch.npu.memory_snapshot()
190
191 def memory_allocated(self, device=None) -> int:
192 """
193 Returns the current GPU memory occupied by tensors in bytes for a given device.
194 """
195 return torch.npu.memory_allocated(device=device)
196
197 def max_memory_allocated(self, device=None) -> int:
198 """
199 Returns the maximum GPU memory occupied by tensors in bytes for a given device.
200 """
201 return torch.npu.max_memory_allocated(device=device)
202
203 def reset_max_memory_allocated(self, device=None) -> None:
204 """
205 Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device.
206 """
207 torch.npu.reset_max_memory_allocated(device=device)
208
209 def reset_max_memory_cached(self, device=None) -> None:
210 """
211 Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.
212 """
213 torch.npu.reset_max_memory_cached(device=device)
214
215 def memory_reserved(self, device=None) -> int:
216 """
217 Returns the current GPU memory managed by the caching allocator in bytes for a given device.
218 """
219 return torch.npu.memory_reserved(device=device)
220
221 def max_memory_reserved(self, device=None) -> int:
222 """
223 Returns the maximum GPU memory managed by the caching allocator in bytes for a given device.
224 """
225 return torch.npu.max_memory_reserved(device=device)
226
227 def set_per_process_memory_fraction(self, fraction: float, device=None) -> None:
228 """
229 Set memory fraction for a process.
230 """
231 torch.npu.set_per_process_memory_fraction(fraction, device=device)
232
233 def reset_peak_memory_stats(self, device=None) -> None:
234 """
235 Resets the "peak" stats tracked by the npu memory allocator.
236 """
237 torch.npu.reset_peak_memory_stats(device=device)
238
239 # =======================
240 # streams and events APIs
241 # =======================
242
243 def Stream(self, device=None, priority=0, **kwargs):
244 """
245 A npu stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See npu-semantics for details.
246 """
247 return torch.npu.Stream(device, priority, **kwargs)
248
249 def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False):
250 """
251 npu events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize npu streams.
252 """
253 return torch.npu.Event(enable_timing, blocking, interprocess)
254
255 def current_stream(self, device=None):
256 """
257 Returns the currently selected Stream for a given device.
258 """
259 return torch.npu.current_stream(device)
260
261 def default_stream(self, device=None):
262 """
263 Returns the default Stream for a given device.
264 """
265 return torch.npu.default_stream(device)
266
267 def set_stream(self, stream_):
268 """
269 Sets the current stream.This is a wrapper API to set the stream.
270 """
271 torch.npu.set_stream(stream_)
272
273 def stream(self, stream_):
274 """
275 Wrapper around the Context-manager StreamContext that selects a given stream.
276 """
277 return torch.npu.stream(stream_)
278
279 # =======================
280 # amp APIs
281 # =======================
282 def autocast(
283 self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True
284 ) -> Callable:
285 """
286 Return autocast function
287 """
288 return torch.npu.amp.autocast(enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)
289
[end of colossalai/accelerator/npu_accelerator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/accelerator/npu_accelerator.py b/colossalai/accelerator/npu_accelerator.py
--- a/colossalai/accelerator/npu_accelerator.py
+++ b/colossalai/accelerator/npu_accelerator.py
@@ -31,7 +31,7 @@
"""
Return the version of the accelerator which torch is built against.
"""
- return torch.version.npu
+ return torch.version.cann
def get_current_device(self) -> torch.device:
"""
|
{"golden_diff": "diff --git a/colossalai/accelerator/npu_accelerator.py b/colossalai/accelerator/npu_accelerator.py\n--- a/colossalai/accelerator/npu_accelerator.py\n+++ b/colossalai/accelerator/npu_accelerator.py\n@@ -31,7 +31,7 @@\n \"\"\"\n Return the version of the accelerator which torch is built against.\n \"\"\"\n- return torch.version.npu\n+ return torch.version.cann\n \n def get_current_device(self) -> torch.device:\n \"\"\"\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.distributed as dist\n\nfrom .base_accelerator import BaseAccelerator\n\ntry:\n import torch_npu # noqa\nexcept ImportError:\n pass\n\n\n__all__ = [\"NpuAccelerator\"]\n\n\nclass NpuAccelerator(BaseAccelerator):\n \"\"\"\n Accelerator class for Huawei NPU devices.\n \"\"\"\n\n def __init__(self):\n super().__init__(name=\"npu\", communication_backend=\"hccl\", is_synchronous=False)\n\n # =======================\n # device APIs\n # =======================\n def get_version(self) -> str:\n \"\"\"\n Return the version of the accelerator which torch is built against.\n \"\"\"\n return torch.version.npu\n\n def get_current_device(self) -> torch.device:\n \"\"\"\n Return the current device.\n \"\"\"\n return torch.device(f\"npu:{torch.npu.current_device()}\")\n\n def current_device(self) -> int:\n \"\"\"\n Return the current device index.\n \"\"\"\n return torch.npu.current_device()\n\n def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None:\n \"\"\"\n Bind the current process to a device.\n \"\"\"\n if device is None:\n if not dist.is_initialized():\n raise RuntimeError(\"Cannot get current device when distributed is not initialized.\")\n device = dist.get_rank() % self.device_count()\n torch.npu.set_device(device)\n\n def get_device_name(self, device: Union[torch.device, int]) -> str:\n \"\"\"\n Return the name of the device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def synchronize(self, device: Union[torch.device, int] = None):\n \"\"\"\n Synchronize the current process.\n \"\"\"\n torch.npu.synchronize(device)\n\n def is_available(self):\n \"\"\"\n Check if the accelerator is available.\n \"\"\"\n return torch.npu.is_available()\n\n def device_count(self):\n \"\"\"\n Return the number of devices on the machine.\n \"\"\"\n return torch.npu.device_count()\n\n def get_device_capability(self, device=None) -> Tuple[int, int]:\n \"\"\"\n Gets the npu capability of a device.\n \"\"\"\n return torch.npu.get_device_capability(device)\n\n def get_device_name(self, device=None) -> str:\n \"\"\"\n Gets the name of a device.\n \"\"\"\n return torch.npu.get_device_name(device)\n\n def get_device_properties(self, device):\n \"\"\"\n Gets the properties of a device.\n \"\"\"\n return torch.npu.get_device_properties(device)\n\n def utilization(self, device=None) -> int:\n \"\"\"\n Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi\n \"\"\"\n return torch.npu.utilization(device)\n\n # =======================\n # random number generator APIs\n # =======================\n def get_rng_state(self, device=\"npu\") -> torch.Tensor:\n \"\"\"\n Returns the random number generator state of the specified GPU as a ByteTensor.\n \"\"\"\n return torch.npu.get_rng_state(device)\n\n def get_rng_state_all(self) -> List[torch.Tensor]:\n \"\"\"\n Returns a list of ByteTensor representing the random number states of all devices.\n \"\"\"\n return torch.npu.get_rng_state_all()\n\n def set_rng_state(self, new_state: torch.ByteTensor, device: str = \"npu\") -> None:\n \"\"\"\n Sets the random number generator state of the specified GPU.\n \"\"\"\n torch.npu.set_rng_state(new_state, device)\n\n def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None:\n \"\"\"\n Sets the random number generator state of all devices.\n \"\"\"\n torch.npu.set_rng_state_all(new_states)\n\n def manual_seed(self, seed: int) -> None:\n \"\"\"\n Sets the seed for generating random numbers for the current GPU.\n \"\"\"\n torch.npu.manual_seed(seed)\n\n def manual_seed_all(self, seed: int) -> None:\n \"\"\"\n Set the random seed for the all processes.\n \"\"\"\n torch.npu.manual_seed_all(seed)\n\n def seed(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number for the current GPU.\n \"\"\"\n torch.npu.seed()\n\n def seed_all(self) -> None:\n \"\"\"\n Sets the seed for generating random numbers to a random number on all GPUs.\n \"\"\"\n torch.npu.seed_all()\n\n def initial_seed(self) -> int:\n \"\"\"\n Returns the current random seed of the current GPU.\n \"\"\"\n return torch.npu.initial_seed()\n\n # =======================\n # memory management APIs\n # =======================\n\n def empty_cache(self) -> None:\n \"\"\"\n Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi.\n \"\"\"\n torch.npu.empty_cache()\n\n def memory_stats(self, device=None) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary of npu memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_stats(device=device)\n\n def memory_summary(self, device=None, abbreviated=False) -> str:\n \"\"\"\n Returns a human-readable printout of the current memory allocator statistics for a given device.\n \"\"\"\n return torch.npu.memory_summary(device=device, abbreviated=abbreviated)\n\n def memory_snapshot(self):\n \"\"\"\n Returns a snapshot of the npu memory allocator state across all devices.\n \"\"\"\n return torch.npu.memory_snapshot()\n\n def memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.memory_allocated(device=device)\n\n def max_memory_allocated(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory occupied by tensors in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_allocated(device=device)\n\n def reset_max_memory_allocated(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device.\n \"\"\"\n torch.npu.reset_max_memory_allocated(device=device)\n\n def reset_max_memory_cached(self, device=None) -> None:\n \"\"\"\n Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.\n \"\"\"\n torch.npu.reset_max_memory_cached(device=device)\n\n def memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the current GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.memory_reserved(device=device)\n\n def max_memory_reserved(self, device=None) -> int:\n \"\"\"\n Returns the maximum GPU memory managed by the caching allocator in bytes for a given device.\n \"\"\"\n return torch.npu.max_memory_reserved(device=device)\n\n def set_per_process_memory_fraction(self, fraction: float, device=None) -> None:\n \"\"\"\n Set memory fraction for a process.\n \"\"\"\n torch.npu.set_per_process_memory_fraction(fraction, device=device)\n\n def reset_peak_memory_stats(self, device=None) -> None:\n \"\"\"\n Resets the \"peak\" stats tracked by the npu memory allocator.\n \"\"\"\n torch.npu.reset_peak_memory_stats(device=device)\n\n # =======================\n # streams and events APIs\n # =======================\n\n def Stream(self, device=None, priority=0, **kwargs):\n \"\"\"\n A npu stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See npu-semantics for details.\n \"\"\"\n return torch.npu.Stream(device, priority, **kwargs)\n\n def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False):\n \"\"\"\n npu events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize npu streams.\n \"\"\"\n return torch.npu.Event(enable_timing, blocking, interprocess)\n\n def current_stream(self, device=None):\n \"\"\"\n Returns the currently selected Stream for a given device.\n \"\"\"\n return torch.npu.current_stream(device)\n\n def default_stream(self, device=None):\n \"\"\"\n Returns the default Stream for a given device.\n \"\"\"\n return torch.npu.default_stream(device)\n\n def set_stream(self, stream_):\n \"\"\"\n Sets the current stream.This is a wrapper API to set the stream.\n \"\"\"\n torch.npu.set_stream(stream_)\n\n def stream(self, stream_):\n \"\"\"\n Wrapper around the Context-manager StreamContext that selects a given stream.\n \"\"\"\n return torch.npu.stream(stream_)\n\n # =======================\n # amp APIs\n # =======================\n def autocast(\n self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True\n ) -> Callable:\n \"\"\"\n Return autocast function\n \"\"\"\n return torch.npu.amp.autocast(enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n", "path": "colossalai/accelerator/npu_accelerator.py"}]}
| 3,293 | 118 |
gh_patches_debug_24929
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-2228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🐛 [Bug] Issue in `broadcastable` converter utility
## Bug Description
Encountered `IndexError` in `broadcastable` utility:
```python
File "~/TensorRT/py/torch_tensorrt/dynamo/conversion/converter_utils.py", line 118, in broadcastable
greater_tensor[i] != lesser_tensor[j]
IndexError: tuple index out of range
While executing %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](...)
```
## To Reproduce
Steps to reproduce the behavior:
Invoke `aten.where` converter with a 0D tensor.
## Expected behavior
Utility should not error.
## Environment
- Torch-TensorRT Version (e.g. 1.0.0): 8c62fca7649ac2e8e74ab7d9d66a20a164e2af51
- PyTorch Version (e.g. 1.0): `2.1.0.dev20230803+cu121`
## Additional context
Blocking compilation of #1697 with torch compile path.
</issue>
<code>
[start of py/torch_tensorrt/dynamo/conversion/converter_utils.py]
1 import logging
2 import re
3 from typing import List, Optional
4
5 import tensorrt as trt
6 import torch
7 from torch.fx.node import Target
8 from torch_tensorrt.fx.converters.converter_utils import (
9 Frameworks,
10 unified_dtype_converter,
11 )
12 from torch_tensorrt.fx.types import TRTDataType, TRTNetwork, TRTTensor
13
14 from .._SourceIR import SourceIR
15 from .converter_registry import ConverterRegistry
16
17 _LOGGER: logging.Logger = logging.getLogger(__name__)
18
19
20 def get_node_name(node: torch.fx.Node) -> str:
21 # nn_module_stack preserves the call stack of pytorch nn.modules
22 # The call stack contains a detailed name of the module
23 # which shows exactly where the module is located in the
24 # network architecture.
25 stack_item = node.meta.get("nn_module_stack", None)
26 # The current node is the last item in the stack
27 mod_stack = stack_item.popitem() if stack_item else ""
28 node_name = str(node)
29 if mod_stack:
30 mod_name = str(mod_stack[0]).replace("___", "/")
31 # Clean up the module name
32 mod_name = re.sub("^.*__self", "", mod_name)
33 mod_name = re.sub(r"_(\d+)$", r"/\g<1>", mod_name)
34 node_name = mod_name + "/" + node_name
35 else:
36 # Try an alternative way to get the module info
37 # like the node.meta['source_fn'] attr
38 pass
39
40 _LOGGER.debug(f"Node meta name {node_name}")
41 return node_name
42
43
44 def dynamic_unsupported(node: torch.fx.Node) -> bool:
45 # Validate that none of the inputs to the node have Dynamic shapes
46 assert isinstance(
47 node, torch.fx.Node
48 ), "Inputs to validator functions must be FX Nodes"
49
50 # Check node value itself
51 if getattr(node.meta["val"], "_has_symbolic_sizes_strides", False):
52 return False
53
54 # Check node arguments individually
55 if any(
56 getattr(arg.meta["val"], "_has_symbolic_sizes_strides", False)
57 for arg in node.args
58 if isinstance(arg, torch.fx.Node)
59 ):
60 return False
61
62 # Check node keyword arguments individually
63 if any(
64 getattr(kwarg.meta["val"], "_has_symbolic_sizes_strides", False)
65 for kwarg in node.kwargs.values()
66 if isinstance(kwarg, torch.fx.Node)
67 ):
68 return False
69
70 return True
71
72
73 def cast_trt_tensor(
74 network: TRTNetwork,
75 input_val: TRTTensor,
76 dtype: TRTDataType,
77 name: str,
78 target: Target = "",
79 source_ir: Optional[SourceIR] = None,
80 ) -> TRTTensor:
81 """
82 Given a TRT Tensor, convert that Tensor to the specified dtype
83 Adds an Identity layer to the network which performs the conversion
84 Args:
85 network (TRTNetwork): A TensorRT network
86 input_val (TRTTensor): A TRT Tensor to cast to a new data type
87 dtype (TRTDataType, torch.dtype, np.dtype): The data type to cast the input Tensor to
88 name (str): Name of the calling layer
89 target (Target): Target of calling node
90 source_ir (SourceIR): SourceIR of calling converter
91 Returns:
92 A TensorRT ITensor which has been casted to the specified dtype
93 """
94 trt_dtype = unified_dtype_converter(dtype, Frameworks.TRT)
95
96 if input_val.dtype != trt_dtype:
97 source_ir = source_ir if source_ir is not None else SourceIR.UNKNOWN
98 target_str = ConverterRegistry.qualified_name_or_str(target)
99 target_name = f"{source_ir}_ops{('.' + target_str) if target_str else ''}"
100
101 identity_layer = network.add_identity(input_val)
102 identity_layer.set_output_type(0, trt_dtype)
103 identity_layer.name = f"Cast ITensor {input_val.name} from {input_val.dtype} to {trt_dtype} - [{target_name}]-[{name}]"
104 return identity_layer.get_output(0)
105 else:
106 return input_val
107
108
109 def cast_int_int_div_trt_tensor(
110 network: TRTNetwork,
111 lhs_val: TRTTensor,
112 rhs_val: TRTTensor,
113 name: str,
114 ) -> List[TRTTensor]:
115 """
116 Given two `int` data type TRT Tensor to div operation, cast the TRT Tensor to float type
117 Args:
118 network (TRTNetwork): A TensorRT network
119 lhs_val (TRTTensor): A TRT Tensor numerator
120 rhs_val (TRTTensor): A TRT Tensor numerator
121 name (str): Name of calling layer
122 Returns:
123 A list of lhs_val and rhs_val casted to the approriate datatype
124 """
125 if (lhs_val.dtype == trt.int8 or lhs_val.dtype == trt.int32) and (
126 rhs_val.dtype == trt.int8 or rhs_val.dtype == trt.int32
127 ):
128 lhs_val = cast_trt_tensor(network, lhs_val, trt.float32, name)
129 rhs_val = cast_trt_tensor(network, rhs_val, trt.float32, name)
130 return [lhs_val, rhs_val]
131
132
133 def broadcastable(
134 a: TRTTensor,
135 b: TRTTensor,
136 ) -> bool:
137 "Check if two tensors are broadcastable according to torch rules"
138 a_shape = tuple(a.shape)
139 b_shape = tuple(b.shape)
140 # check from the trailing
141 diff = len(a_shape) - len(b_shape)
142 if diff == 0:
143 return True
144 if diff > 0:
145 max = len(a_shape)
146 min = len(b_shape)
147 greater_tensor = a_shape
148 lesser_tensor = b_shape
149 elif diff < 0:
150 max = len(b_shape)
151 min = len(a_shape)
152 greater_tensor = b_shape
153 lesser_tensor = a_shape
154 j = min - 1
155 for i in range(max - 1, diff - 1, -1):
156 if not (
157 greater_tensor[i] != lesser_tensor[j]
158 and (greater_tensor[i] == 1 or lesser_tensor[i] == 1)
159 ):
160 return False
161 return True
162
[end of py/torch_tensorrt/dynamo/conversion/converter_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py/torch_tensorrt/dynamo/conversion/converter_utils.py b/py/torch_tensorrt/dynamo/conversion/converter_utils.py
--- a/py/torch_tensorrt/dynamo/conversion/converter_utils.py
+++ b/py/torch_tensorrt/dynamo/conversion/converter_utils.py
@@ -137,25 +137,23 @@
"Check if two tensors are broadcastable according to torch rules"
a_shape = tuple(a.shape)
b_shape = tuple(b.shape)
+
# check from the trailing
diff = len(a_shape) - len(b_shape)
- if diff == 0:
+
+ # Validate tensors have same rank and shape
+ if diff == 0 and all(a_shape[i] == b_shape[i] for i in range(len(a_shape))):
return True
+
+ # Left-pad the shorter dimension with ones
if diff > 0:
- max = len(a_shape)
- min = len(b_shape)
- greater_tensor = a_shape
- lesser_tensor = b_shape
- elif diff < 0:
- max = len(b_shape)
- min = len(a_shape)
- greater_tensor = b_shape
- lesser_tensor = a_shape
- j = min - 1
- for i in range(max - 1, diff - 1, -1):
- if not (
- greater_tensor[i] != lesser_tensor[j]
- and (greater_tensor[i] == 1 or lesser_tensor[i] == 1)
- ):
+ b_shape = (1,) * abs(diff) + b_shape
+ else:
+ a_shape = (1,) * abs(diff) + a_shape
+
+ # Validate one of the following conditions for broadcastability per-dimension
+ # 1. Equal number of dimensions or 2. Dimension has shape 1
+ for i in range(len(a_shape)):
+ if not (a_shape[i] == b_shape[i] or a_shape[i] == 1 or b_shape[i] == 1):
return False
return True
|
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/converter_utils.py b/py/torch_tensorrt/dynamo/conversion/converter_utils.py\n--- a/py/torch_tensorrt/dynamo/conversion/converter_utils.py\n+++ b/py/torch_tensorrt/dynamo/conversion/converter_utils.py\n@@ -137,25 +137,23 @@\n \"Check if two tensors are broadcastable according to torch rules\"\n a_shape = tuple(a.shape)\n b_shape = tuple(b.shape)\n+\n # check from the trailing\n diff = len(a_shape) - len(b_shape)\n- if diff == 0:\n+\n+ # Validate tensors have same rank and shape\n+ if diff == 0 and all(a_shape[i] == b_shape[i] for i in range(len(a_shape))):\n return True\n+\n+ # Left-pad the shorter dimension with ones\n if diff > 0:\n- max = len(a_shape)\n- min = len(b_shape)\n- greater_tensor = a_shape\n- lesser_tensor = b_shape\n- elif diff < 0:\n- max = len(b_shape)\n- min = len(a_shape)\n- greater_tensor = b_shape\n- lesser_tensor = a_shape\n- j = min - 1\n- for i in range(max - 1, diff - 1, -1):\n- if not (\n- greater_tensor[i] != lesser_tensor[j]\n- and (greater_tensor[i] == 1 or lesser_tensor[i] == 1)\n- ):\n+ b_shape = (1,) * abs(diff) + b_shape\n+ else:\n+ a_shape = (1,) * abs(diff) + a_shape\n+\n+ # Validate one of the following conditions for broadcastability per-dimension\n+ # 1. Equal number of dimensions or 2. Dimension has shape 1\n+ for i in range(len(a_shape)):\n+ if not (a_shape[i] == b_shape[i] or a_shape[i] == 1 or b_shape[i] == 1):\n return False\n return True\n", "issue": "\ud83d\udc1b [Bug] Issue in `broadcastable` converter utility\n## Bug Description\r\n\r\nEncountered `IndexError` in `broadcastable` utility:\r\n\r\n```python\r\nFile \"~/TensorRT/py/torch_tensorrt/dynamo/conversion/converter_utils.py\", line 118, in broadcastable\r\n greater_tensor[i] != lesser_tensor[j]\r\n\r\nIndexError: tuple index out of range\r\n\r\nWhile executing %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](...)\r\n```\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nInvoke `aten.where` converter with a 0D tensor.\r\n\r\n## Expected behavior\r\n\r\nUtility should not error.\r\n\r\n## Environment\r\n\r\n - Torch-TensorRT Version (e.g. 1.0.0): 8c62fca7649ac2e8e74ab7d9d66a20a164e2af51\r\n - PyTorch Version (e.g. 1.0): `2.1.0.dev20230803+cu121`\r\n\r\n## Additional context\r\n\r\nBlocking compilation of #1697 with torch compile path.\r\n\n", "before_files": [{"content": "import logging\nimport re\nfrom typing import List, Optional\n\nimport tensorrt as trt\nimport torch\nfrom torch.fx.node import Target\nfrom torch_tensorrt.fx.converters.converter_utils import (\n Frameworks,\n unified_dtype_converter,\n)\nfrom torch_tensorrt.fx.types import TRTDataType, TRTNetwork, TRTTensor\n\nfrom .._SourceIR import SourceIR\nfrom .converter_registry import ConverterRegistry\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\ndef get_node_name(node: torch.fx.Node) -> str:\n # nn_module_stack preserves the call stack of pytorch nn.modules\n # The call stack contains a detailed name of the module\n # which shows exactly where the module is located in the\n # network architecture.\n stack_item = node.meta.get(\"nn_module_stack\", None)\n # The current node is the last item in the stack\n mod_stack = stack_item.popitem() if stack_item else \"\"\n node_name = str(node)\n if mod_stack:\n mod_name = str(mod_stack[0]).replace(\"___\", \"/\")\n # Clean up the module name\n mod_name = re.sub(\"^.*__self\", \"\", mod_name)\n mod_name = re.sub(r\"_(\\d+)$\", r\"/\\g<1>\", mod_name)\n node_name = mod_name + \"/\" + node_name\n else:\n # Try an alternative way to get the module info\n # like the node.meta['source_fn'] attr\n pass\n\n _LOGGER.debug(f\"Node meta name {node_name}\")\n return node_name\n\n\ndef dynamic_unsupported(node: torch.fx.Node) -> bool:\n # Validate that none of the inputs to the node have Dynamic shapes\n assert isinstance(\n node, torch.fx.Node\n ), \"Inputs to validator functions must be FX Nodes\"\n\n # Check node value itself\n if getattr(node.meta[\"val\"], \"_has_symbolic_sizes_strides\", False):\n return False\n\n # Check node arguments individually\n if any(\n getattr(arg.meta[\"val\"], \"_has_symbolic_sizes_strides\", False)\n for arg in node.args\n if isinstance(arg, torch.fx.Node)\n ):\n return False\n\n # Check node keyword arguments individually\n if any(\n getattr(kwarg.meta[\"val\"], \"_has_symbolic_sizes_strides\", False)\n for kwarg in node.kwargs.values()\n if isinstance(kwarg, torch.fx.Node)\n ):\n return False\n\n return True\n\n\ndef cast_trt_tensor(\n network: TRTNetwork,\n input_val: TRTTensor,\n dtype: TRTDataType,\n name: str,\n target: Target = \"\",\n source_ir: Optional[SourceIR] = None,\n) -> TRTTensor:\n \"\"\"\n Given a TRT Tensor, convert that Tensor to the specified dtype\n Adds an Identity layer to the network which performs the conversion\n Args:\n network (TRTNetwork): A TensorRT network\n input_val (TRTTensor): A TRT Tensor to cast to a new data type\n dtype (TRTDataType, torch.dtype, np.dtype): The data type to cast the input Tensor to\n name (str): Name of the calling layer\n target (Target): Target of calling node\n source_ir (SourceIR): SourceIR of calling converter\n Returns:\n A TensorRT ITensor which has been casted to the specified dtype\n \"\"\"\n trt_dtype = unified_dtype_converter(dtype, Frameworks.TRT)\n\n if input_val.dtype != trt_dtype:\n source_ir = source_ir if source_ir is not None else SourceIR.UNKNOWN\n target_str = ConverterRegistry.qualified_name_or_str(target)\n target_name = f\"{source_ir}_ops{('.' + target_str) if target_str else ''}\"\n\n identity_layer = network.add_identity(input_val)\n identity_layer.set_output_type(0, trt_dtype)\n identity_layer.name = f\"Cast ITensor {input_val.name} from {input_val.dtype} to {trt_dtype} - [{target_name}]-[{name}]\"\n return identity_layer.get_output(0)\n else:\n return input_val\n\n\ndef cast_int_int_div_trt_tensor(\n network: TRTNetwork,\n lhs_val: TRTTensor,\n rhs_val: TRTTensor,\n name: str,\n) -> List[TRTTensor]:\n \"\"\"\n Given two `int` data type TRT Tensor to div operation, cast the TRT Tensor to float type\n Args:\n network (TRTNetwork): A TensorRT network\n lhs_val (TRTTensor): A TRT Tensor numerator\n rhs_val (TRTTensor): A TRT Tensor numerator\n name (str): Name of calling layer\n Returns:\n A list of lhs_val and rhs_val casted to the approriate datatype\n \"\"\"\n if (lhs_val.dtype == trt.int8 or lhs_val.dtype == trt.int32) and (\n rhs_val.dtype == trt.int8 or rhs_val.dtype == trt.int32\n ):\n lhs_val = cast_trt_tensor(network, lhs_val, trt.float32, name)\n rhs_val = cast_trt_tensor(network, rhs_val, trt.float32, name)\n return [lhs_val, rhs_val]\n\n\ndef broadcastable(\n a: TRTTensor,\n b: TRTTensor,\n) -> bool:\n \"Check if two tensors are broadcastable according to torch rules\"\n a_shape = tuple(a.shape)\n b_shape = tuple(b.shape)\n # check from the trailing\n diff = len(a_shape) - len(b_shape)\n if diff == 0:\n return True\n if diff > 0:\n max = len(a_shape)\n min = len(b_shape)\n greater_tensor = a_shape\n lesser_tensor = b_shape\n elif diff < 0:\n max = len(b_shape)\n min = len(a_shape)\n greater_tensor = b_shape\n lesser_tensor = a_shape\n j = min - 1\n for i in range(max - 1, diff - 1, -1):\n if not (\n greater_tensor[i] != lesser_tensor[j]\n and (greater_tensor[i] == 1 or lesser_tensor[i] == 1)\n ):\n return False\n return True\n", "path": "py/torch_tensorrt/dynamo/conversion/converter_utils.py"}]}
| 2,548 | 460 |
gh_patches_debug_19691
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2967
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid Aliases when using SSM dynamic references
### CloudFormation Lint Version
0.83.0
### What operating system are you using?
Mac
### Describe the bug
When using a dynamic reference to resolve the Alias domain, cfn-lint fails indicating it's an invalid alias. Shouldn't the code check if this is a `REGEX_DYN_REF` in https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/resources/cloudfront/Aliases.py and ignore if so?
A workaround would be to use "!Sub" which apparently is ignored already (`FUNCTIONS`). Shouldn't we also ignore when `REGEX_DYN_REF`?
### Expected behavior
E3013 shouldn't be informed, since there's no way to validate the dynamic-reference value from cfn-lint perspective (?)
### Reproduction template
```
CloudFront:
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Enabled: true
Aliases:
- "{{resolve:ssm:/env/fqdns/certifier}}"
DefaultRootObject: index.html
```
</issue>
<code>
[start of src/cfnlint/rules/resources/cloudfront/Aliases.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import regex as re
6
7 from cfnlint.helpers import FUNCTIONS
8 from cfnlint.rules import CloudFormationLintRule, RuleMatch
9
10
11 class Aliases(CloudFormationLintRule):
12 """Check if CloudFront Aliases are valid domain names"""
13
14 id = "E3013"
15 shortdesc = "CloudFront Aliases"
16 description = "CloudFront aliases should contain valid domain names"
17 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases"
18 tags = ["properties", "cloudfront"]
19
20 def match(self, cfn):
21 """Check cloudfront Resource Parameters"""
22
23 matches = []
24
25 valid_domain = re.compile(
26 r"^(?:[a-z0-9\*](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$"
27 )
28
29 results = cfn.get_resource_properties(
30 ["AWS::CloudFront::Distribution", "DistributionConfig"]
31 )
32 for result in results:
33 aliases = result["Value"].get("Aliases")
34 if aliases:
35 for alias in aliases:
36 if isinstance(alias, str) and alias not in FUNCTIONS:
37 wildcard = alias.split(".")
38 if "*" in wildcard[1:]:
39 path = result["Path"] + ["Aliases"]
40 message = f'Invalid use of wildcards: {alias} at {"/".join(result["Path"])}'
41 matches.append(RuleMatch(path, message))
42 if not re.match(valid_domain, alias):
43 path = result["Path"] + ["Aliases"]
44 message = f'Invalid alias found: {alias} at {"/".join(result["Path"])}'
45 matches.append(RuleMatch(path, message))
46
47 return matches
48
[end of src/cfnlint/rules/resources/cloudfront/Aliases.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/cloudfront/Aliases.py b/src/cfnlint/rules/resources/cloudfront/Aliases.py
--- a/src/cfnlint/rules/resources/cloudfront/Aliases.py
+++ b/src/cfnlint/rules/resources/cloudfront/Aliases.py
@@ -4,7 +4,7 @@
"""
import regex as re
-from cfnlint.helpers import FUNCTIONS
+from cfnlint.helpers import FUNCTIONS, REGEX_DYN_REF
from cfnlint.rules import CloudFormationLintRule, RuleMatch
@@ -35,6 +35,8 @@
for alias in aliases:
if isinstance(alias, str) and alias not in FUNCTIONS:
wildcard = alias.split(".")
+ if re.match(REGEX_DYN_REF, alias):
+ continue
if "*" in wildcard[1:]:
path = result["Path"] + ["Aliases"]
message = f'Invalid use of wildcards: {alias} at {"/".join(result["Path"])}'
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/cloudfront/Aliases.py b/src/cfnlint/rules/resources/cloudfront/Aliases.py\n--- a/src/cfnlint/rules/resources/cloudfront/Aliases.py\n+++ b/src/cfnlint/rules/resources/cloudfront/Aliases.py\n@@ -4,7 +4,7 @@\n \"\"\"\n import regex as re\n \n-from cfnlint.helpers import FUNCTIONS\n+from cfnlint.helpers import FUNCTIONS, REGEX_DYN_REF\n from cfnlint.rules import CloudFormationLintRule, RuleMatch\n \n \n@@ -35,6 +35,8 @@\n for alias in aliases:\n if isinstance(alias, str) and alias not in FUNCTIONS:\n wildcard = alias.split(\".\")\n+ if re.match(REGEX_DYN_REF, alias):\n+ continue\n if \"*\" in wildcard[1:]:\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid use of wildcards: {alias} at {\"/\".join(result[\"Path\"])}'\n", "issue": "Invalid Aliases when using SSM dynamic references\n### CloudFormation Lint Version\r\n\r\n0.83.0\r\n\r\n### What operating system are you using?\r\n\r\nMac\r\n\r\n### Describe the bug\r\n\r\nWhen using a dynamic reference to resolve the Alias domain, cfn-lint fails indicating it's an invalid alias. Shouldn't the code check if this is a `REGEX_DYN_REF` in https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/rules/resources/cloudfront/Aliases.py and ignore if so?\r\n\r\nA workaround would be to use \"!Sub\" which apparently is ignored already (`FUNCTIONS`). Shouldn't we also ignore when `REGEX_DYN_REF`?\r\n\r\n### Expected behavior\r\n\r\nE3013 shouldn't be informed, since there's no way to validate the dynamic-reference value from cfn-lint perspective (?)\r\n\r\n### Reproduction template\r\n\r\n```\r\nCloudFront:\r\n Type: AWS::CloudFront::Distribution\r\n Properties:\r\n DistributionConfig:\r\n Enabled: true\r\n Aliases:\r\n - \"{{resolve:ssm:/env/fqdns/certifier}}\"\r\n DefaultRootObject: index.html\r\n``` \n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport regex as re\n\nfrom cfnlint.helpers import FUNCTIONS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Aliases(CloudFormationLintRule):\n \"\"\"Check if CloudFront Aliases are valid domain names\"\"\"\n\n id = \"E3013\"\n shortdesc = \"CloudFront Aliases\"\n description = \"CloudFront aliases should contain valid domain names\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases\"\n tags = [\"properties\", \"cloudfront\"]\n\n def match(self, cfn):\n \"\"\"Check cloudfront Resource Parameters\"\"\"\n\n matches = []\n\n valid_domain = re.compile(\n r\"^(?:[a-z0-9\\*](?:[a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$\"\n )\n\n results = cfn.get_resource_properties(\n [\"AWS::CloudFront::Distribution\", \"DistributionConfig\"]\n )\n for result in results:\n aliases = result[\"Value\"].get(\"Aliases\")\n if aliases:\n for alias in aliases:\n if isinstance(alias, str) and alias not in FUNCTIONS:\n wildcard = alias.split(\".\")\n if \"*\" in wildcard[1:]:\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid use of wildcards: {alias} at {\"/\".join(result[\"Path\"])}'\n matches.append(RuleMatch(path, message))\n if not re.match(valid_domain, alias):\n path = result[\"Path\"] + [\"Aliases\"]\n message = f'Invalid alias found: {alias} at {\"/\".join(result[\"Path\"])}'\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/cloudfront/Aliases.py"}]}
| 1,316 | 213 |
gh_patches_debug_25570
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-649
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EHN: New geoaccessor `toposimiplify`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
simplify multi-geometries method but could keep the shared edges
</issue>
<code>
[start of dtoolkit/geoaccessor/geoseries/toposimplify.py]
1 from __future__ import annotations
2
3 from typing import Literal
4
5 import geopandas as gpd
6 from pandas.util._decorators import doc
7
8 from dtoolkit.accessor.dataframe import to_series # noqa: F401
9 from dtoolkit.geoaccessor.register import register_geoseries_method
10
11
12 @register_geoseries_method
13 @doc(klass=":class:`~geopandas.GeoSeries`")
14 def toposimplify(
15 s: gpd.GeoSeries,
16 /,
17 tolerance: float,
18 simplify_algorithm: Literal["dp", "vw"] = "dp",
19 simplify_with: Literal["shapely", "simplification"] = "shapely",
20 prevent_oversimplify: bool = True,
21 ) -> gpd.GeoSeries:
22 """
23 Apply :func:`topojson.Topology.toposimplify` to {klass} to keep **shared edges**.
24
25 Parameters
26 ----------
27 tolerance : float
28 All parts of a simplified geometry will be no more than tolerance distance from
29 the original.
30
31 simplify_algorithm : {{'dp', 'vw'}}, default 'dp'
32 ``vw`` will only be selected if ``simplify_with`` is set to ``simplification``.
33 - ``dp`` : Douglas-Peucker
34 - ``vw`` : Visvalingam-Whyatt
35
36 simplify_with : {{'shapely', 'simplification'}}, default 'shapely'
37 Sets the package to use for simplifying. Shapely adopts solely Douglas-Peucker
38 and simplification both Douglas-Peucker and Visvalingam-Whyatt. The package
39 simplification is known to be quicker than shapely.
40
41 prevent_oversimplify : bool, default True
42 If `True`, the simplification is slower, but the likelihood of producing
43 valid geometries is higher as it prevents oversimplification. Simplification
44 happens on paths separately, so this setting is especially relevant for rings
45 with no partial shared paths. This is also known as a topology-preserving
46 variant of simplification.
47
48 Returns
49 -------
50 {klass}
51
52 Raises
53 ------
54 ModuleNotFoundError
55 - If don't have module named 'topojson'.
56 - If don't have module named 'simplification'.
57
58 See Also
59 --------
60 geopandas.GeoSeries.simplify
61 dtoolkit.geoaccessor.geoseries.toposimplify
62 dtoolkit.geoaccessor.geodataframe.toposimplify
63 topojson.Topology.toposimplify
64 https://mattijn.github.io/topojson/api/topojson.core.topology.html#toposimplify
65
66 Examples
67 --------
68 >>> import dtoolkit.geoaccessor
69 >>> import geopandas as gpd
70 >>> df = (
71 ... gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
72 ... .query('continent == "Africa"')
73 ... )
74 >>> df.head()
75 pop_est ... geometry
76 1 58005463.0 ... POLYGON ((33.90371 -0.95000, 34.07262 -1.05982...
77 2 603253.0 ... POLYGON ((-8.66559 27.65643, -8.66512 27.58948...
78 11 86790567.0 ... POLYGON ((29.34000 -4.49998, 29.51999 -5.41998...
79 12 10192317.3 ... POLYGON ((41.58513 -1.68325, 40.99300 -0.85829...
80 13 52573973.0 ... POLYGON ((39.20222 -4.67677, 37.76690 -3.67712...
81 <BLANKLINE>
82 [5 rows x 6 columns]
83 >>> df.toposimplify(0.1).head()
84 pop_est ... geometry
85 1 58005463.0 ... POLYGON ((33.90367 -0.95000, 30.76984 -1.01452...
86 2 603253.0 ... POLYGON ((-8.66561 27.65644, -8.81786 27.65644...
87 11 86790567.0 ... POLYGON ((29.33999 -4.50001, 29.27634 -3.29391...
88 12 10192317.3 ... POLYGON ((41.58509 -1.68324, 42.04156 -0.91918...
89 13 52573973.0 ... POLYGON ((39.20220 -4.67675, 39.60493 -4.34650...
90 <BLANKLINE>
91 [5 rows x 6 columns]
92
93 .. plot::
94
95 import dtoolkit.geoaccessor
96 import geopandas as gpd
97 import matplotlib.pyplot as plt
98
99
100 df = (
101 gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
102 .query('continent == "Africa"')
103 )
104
105 fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)
106
107 df.simplify(1).plot(ax=ax1, alpha=0.7)
108 df.toposimplify(1).plot(ax=ax2, alpha=0.7)
109
110 ax1.set_title("simplify")
111 ax1.set_axis_off()
112 ax2.set_title("toposimplify")
113 ax2.set_axis_off()
114 fig.tight_layout()
115 plt.show()
116 """
117 from topojson import Topology
118
119 return (
120 Topology(
121 s,
122 toposimplify=tolerance,
123 simplify_algorithm=simplify_algorithm,
124 simplify_with=simplify_with,
125 prevent_oversimplify=prevent_oversimplify,
126 )
127 # `to_gdf` return is a GeoDataFrame, require GeoSeries
128 .to_gdf(crs=s.crs).to_series()
129 # To fix https://github.com/mattijn/topojson/issues/164
130 .set_axis(s.index)
131 )
132
[end of dtoolkit/geoaccessor/geoseries/toposimplify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dtoolkit/geoaccessor/geoseries/toposimplify.py b/dtoolkit/geoaccessor/geoseries/toposimplify.py
--- a/dtoolkit/geoaccessor/geoseries/toposimplify.py
+++ b/dtoolkit/geoaccessor/geoseries/toposimplify.py
@@ -20,7 +20,12 @@
prevent_oversimplify: bool = True,
) -> gpd.GeoSeries:
"""
- Apply :func:`topojson.Topology.toposimplify` to {klass} to keep **shared edges**.
+ Returns a {klass} containing a simplified representation of each geometry.
+ Similar to :meth:`~geopandas.GeoSeries.simplify`, but keeps shared edges.
+
+ .. image:: ../../../../_static/simplify-vs-toposimplify.png
+ :width: 80%
+ :align: center
Parameters
----------
@@ -89,30 +94,6 @@
13 52573973.0 ... POLYGON ((39.20220 -4.67675, 39.60493 -4.34650...
<BLANKLINE>
[5 rows x 6 columns]
-
- .. plot::
-
- import dtoolkit.geoaccessor
- import geopandas as gpd
- import matplotlib.pyplot as plt
-
-
- df = (
- gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
- .query('continent == "Africa"')
- )
-
- fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)
-
- df.simplify(1).plot(ax=ax1, alpha=0.7)
- df.toposimplify(1).plot(ax=ax2, alpha=0.7)
-
- ax1.set_title("simplify")
- ax1.set_axis_off()
- ax2.set_title("toposimplify")
- ax2.set_axis_off()
- fig.tight_layout()
- plt.show()
"""
from topojson import Topology
|
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/toposimplify.py b/dtoolkit/geoaccessor/geoseries/toposimplify.py\n--- a/dtoolkit/geoaccessor/geoseries/toposimplify.py\n+++ b/dtoolkit/geoaccessor/geoseries/toposimplify.py\n@@ -20,7 +20,12 @@\n prevent_oversimplify: bool = True,\n ) -> gpd.GeoSeries:\n \"\"\"\n- Apply :func:`topojson.Topology.toposimplify` to {klass} to keep **shared edges**.\n+ Returns a {klass} containing a simplified representation of each geometry.\n+ Similar to :meth:`~geopandas.GeoSeries.simplify`, but keeps shared edges.\n+\n+ .. image:: ../../../../_static/simplify-vs-toposimplify.png\n+ :width: 80%\n+ :align: center\n \n Parameters\n ----------\n@@ -89,30 +94,6 @@\n 13 52573973.0 ... POLYGON ((39.20220 -4.67675, 39.60493 -4.34650...\n <BLANKLINE>\n [5 rows x 6 columns]\n-\n- .. plot::\n-\n- import dtoolkit.geoaccessor\n- import geopandas as gpd\n- import matplotlib.pyplot as plt\n-\n-\n- df = (\n- gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\n- .query('continent == \"Africa\"')\n- )\n-\n- fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)\n-\n- df.simplify(1).plot(ax=ax1, alpha=0.7)\n- df.toposimplify(1).plot(ax=ax2, alpha=0.7)\n-\n- ax1.set_title(\"simplify\")\n- ax1.set_axis_off()\n- ax2.set_title(\"toposimplify\")\n- ax2.set_axis_off()\n- fig.tight_layout()\n- plt.show()\n \"\"\"\n from topojson import Topology\n", "issue": "EHN: New geoaccessor `toposimiplify`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nsimplify multi-geometries method but could keep the shared edges\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Literal\n\nimport geopandas as gpd\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit.accessor.dataframe import to_series # noqa: F401\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\n@doc(klass=\":class:`~geopandas.GeoSeries`\")\ndef toposimplify(\n s: gpd.GeoSeries,\n /,\n tolerance: float,\n simplify_algorithm: Literal[\"dp\", \"vw\"] = \"dp\",\n simplify_with: Literal[\"shapely\", \"simplification\"] = \"shapely\",\n prevent_oversimplify: bool = True,\n) -> gpd.GeoSeries:\n \"\"\"\n Apply :func:`topojson.Topology.toposimplify` to {klass} to keep **shared edges**.\n\n Parameters\n ----------\n tolerance : float\n All parts of a simplified geometry will be no more than tolerance distance from\n the original.\n\n simplify_algorithm : {{'dp', 'vw'}}, default 'dp'\n ``vw`` will only be selected if ``simplify_with`` is set to ``simplification``.\n - ``dp`` : Douglas-Peucker\n - ``vw`` : Visvalingam-Whyatt\n\n simplify_with : {{'shapely', 'simplification'}}, default 'shapely'\n Sets the package to use for simplifying. Shapely adopts solely Douglas-Peucker\n and simplification both Douglas-Peucker and Visvalingam-Whyatt. The package\n simplification is known to be quicker than shapely.\n\n prevent_oversimplify : bool, default True\n If `True`, the simplification is slower, but the likelihood of producing\n valid geometries is higher as it prevents oversimplification. Simplification\n happens on paths separately, so this setting is especially relevant for rings\n with no partial shared paths. This is also known as a topology-preserving\n variant of simplification.\n\n Returns\n -------\n {klass}\n\n Raises\n ------\n ModuleNotFoundError\n - If don't have module named 'topojson'.\n - If don't have module named 'simplification'.\n\n See Also\n --------\n geopandas.GeoSeries.simplify\n dtoolkit.geoaccessor.geoseries.toposimplify\n dtoolkit.geoaccessor.geodataframe.toposimplify\n topojson.Topology.toposimplify\n https://mattijn.github.io/topojson/api/topojson.core.topology.html#toposimplify\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> df = (\n ... gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\n ... .query('continent == \"Africa\"')\n ... )\n >>> df.head()\n pop_est ... geometry\n 1 58005463.0 ... POLYGON ((33.90371 -0.95000, 34.07262 -1.05982...\n 2 603253.0 ... POLYGON ((-8.66559 27.65643, -8.66512 27.58948...\n 11 86790567.0 ... POLYGON ((29.34000 -4.49998, 29.51999 -5.41998...\n 12 10192317.3 ... POLYGON ((41.58513 -1.68325, 40.99300 -0.85829...\n 13 52573973.0 ... POLYGON ((39.20222 -4.67677, 37.76690 -3.67712...\n <BLANKLINE>\n [5 rows x 6 columns]\n >>> df.toposimplify(0.1).head()\n pop_est ... geometry\n 1 58005463.0 ... POLYGON ((33.90367 -0.95000, 30.76984 -1.01452...\n 2 603253.0 ... POLYGON ((-8.66561 27.65644, -8.81786 27.65644...\n 11 86790567.0 ... POLYGON ((29.33999 -4.50001, 29.27634 -3.29391...\n 12 10192317.3 ... POLYGON ((41.58509 -1.68324, 42.04156 -0.91918...\n 13 52573973.0 ... POLYGON ((39.20220 -4.67675, 39.60493 -4.34650...\n <BLANKLINE>\n [5 rows x 6 columns]\n\n .. plot::\n\n import dtoolkit.geoaccessor\n import geopandas as gpd\n import matplotlib.pyplot as plt\n\n\n df = (\n gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\n .query('continent == \"Africa\"')\n )\n\n fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)\n\n df.simplify(1).plot(ax=ax1, alpha=0.7)\n df.toposimplify(1).plot(ax=ax2, alpha=0.7)\n\n ax1.set_title(\"simplify\")\n ax1.set_axis_off()\n ax2.set_title(\"toposimplify\")\n ax2.set_axis_off()\n fig.tight_layout()\n plt.show()\n \"\"\"\n from topojson import Topology\n\n return (\n Topology(\n s,\n toposimplify=tolerance,\n simplify_algorithm=simplify_algorithm,\n simplify_with=simplify_with,\n prevent_oversimplify=prevent_oversimplify,\n )\n # `to_gdf` return is a GeoDataFrame, require GeoSeries\n .to_gdf(crs=s.crs).to_series()\n # To fix https://github.com/mattijn/topojson/issues/164\n .set_axis(s.index)\n )\n", "path": "dtoolkit/geoaccessor/geoseries/toposimplify.py"}]}
| 2,609 | 489 |
gh_patches_debug_13562
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JupyterViz space view limits
**What's the problem this feature will solve?**
At the moment, when drawing spaces with JupyterViz, the axes limits are left to be automatically determined by Matplotlib. Would it not make more sense (in particular of continuous spaces, to derive xlim and xmax from the space itself?
**Describe the solution you'd like**
The simplest fix would be to adapt `_draw_grid` to use `space.width` and `space.height` while `_draw_continuos_space` uses `space.x_min`, `space.x_max`, `space.y_min`, and `space.y_max`. Are there reasons where the user might want to rely on matpltolib's automatic determination of the bounds? If so, then a slightly more sophisticated solution would be needed where kwargs are passed to from jupyterviz to the underlying space_drawer function. So you would add a `space_drawer_kwargs` keyword argument.
</issue>
<code>
[start of mesa/experimental/components/matplotlib.py]
1 from typing import Optional
2
3 import networkx as nx
4 import solara
5 from matplotlib.figure import Figure
6 from matplotlib.ticker import MaxNLocator
7
8 import mesa
9
10
11 @solara.component
12 def SpaceMatplotlib(model, agent_portrayal, dependencies: Optional[list[any]] = None):
13 space_fig = Figure()
14 space_ax = space_fig.subplots()
15 space = getattr(model, "grid", None)
16 if space is None:
17 # Sometimes the space is defined as model.space instead of model.grid
18 space = model.space
19 if isinstance(space, mesa.space.NetworkGrid):
20 _draw_network_grid(space, space_ax, agent_portrayal)
21 elif isinstance(space, mesa.space.ContinuousSpace):
22 _draw_continuous_space(space, space_ax, agent_portrayal)
23 else:
24 _draw_grid(space, space_ax, agent_portrayal)
25 space_ax.set_axis_off()
26 solara.FigureMatplotlib(space_fig, format="png", dependencies=dependencies)
27
28
29 def _draw_grid(space, space_ax, agent_portrayal):
30 def portray(g):
31 x = []
32 y = []
33 s = [] # size
34 c = [] # color
35 for i in range(g.width):
36 for j in range(g.height):
37 content = g._grid[i][j]
38 if not content:
39 continue
40 if not hasattr(content, "__iter__"):
41 # Is a single grid
42 content = [content]
43 for agent in content:
44 data = agent_portrayal(agent)
45 x.append(i)
46 y.append(j)
47 if "size" in data:
48 s.append(data["size"])
49 if "color" in data:
50 c.append(data["color"])
51 out = {"x": x, "y": y}
52 if len(s) > 0:
53 out["s"] = s
54 if len(c) > 0:
55 out["c"] = c
56 return out
57
58 space_ax.scatter(**portray(space))
59
60
61 def _draw_network_grid(space, space_ax, agent_portrayal):
62 graph = space.G
63 pos = nx.spring_layout(graph, seed=0)
64 nx.draw(
65 graph,
66 ax=space_ax,
67 pos=pos,
68 **agent_portrayal(graph),
69 )
70
71
72 def _draw_continuous_space(space, space_ax, agent_portrayal):
73 def portray(space):
74 x = []
75 y = []
76 s = [] # size
77 c = [] # color
78 for agent in space._agent_to_index:
79 data = agent_portrayal(agent)
80 _x, _y = agent.pos
81 x.append(_x)
82 y.append(_y)
83 if "size" in data:
84 s.append(data["size"])
85 if "color" in data:
86 c.append(data["color"])
87 out = {"x": x, "y": y}
88 if len(s) > 0:
89 out["s"] = s
90 if len(c) > 0:
91 out["c"] = c
92 return out
93
94 space_ax.scatter(**portray(space))
95
96
97 def make_plot(model, measure):
98 fig = Figure()
99 ax = fig.subplots()
100 df = model.datacollector.get_model_vars_dataframe()
101 if isinstance(measure, str):
102 ax.plot(df.loc[:, measure])
103 ax.set_ylabel(measure)
104 elif isinstance(measure, dict):
105 for m, color in measure.items():
106 ax.plot(df.loc[:, m], label=m, color=color)
107 fig.legend()
108 elif isinstance(measure, (list, tuple)):
109 for m in measure:
110 ax.plot(df.loc[:, m], label=m)
111 fig.legend()
112 # Set integer x axis
113 ax.xaxis.set_major_locator(MaxNLocator(integer=True))
114 solara.FigureMatplotlib(fig)
115
[end of mesa/experimental/components/matplotlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mesa/experimental/components/matplotlib.py b/mesa/experimental/components/matplotlib.py
--- a/mesa/experimental/components/matplotlib.py
+++ b/mesa/experimental/components/matplotlib.py
@@ -55,6 +55,8 @@
out["c"] = c
return out
+ space_ax.set_xlim(-1, space.width)
+ space_ax.set_ylim(-1, space.height)
space_ax.scatter(**portray(space))
@@ -91,6 +93,14 @@
out["c"] = c
return out
+ width = space.x_max - space.x_min
+ x_padding = width / 20
+ height = space.y_max - space.y_min
+ y_padding = height / 20
+ space_ax.set_xlim(space.x_min - x_padding, space.x_max + x_padding)
+ space_ax.set_ylim(space.y_min - y_padding, space.y_max + y_padding)
+ space_ax.scatter(**portray(space))
+
space_ax.scatter(**portray(space))
|
{"golden_diff": "diff --git a/mesa/experimental/components/matplotlib.py b/mesa/experimental/components/matplotlib.py\n--- a/mesa/experimental/components/matplotlib.py\n+++ b/mesa/experimental/components/matplotlib.py\n@@ -55,6 +55,8 @@\n out[\"c\"] = c\n return out\n \n+ space_ax.set_xlim(-1, space.width)\n+ space_ax.set_ylim(-1, space.height)\n space_ax.scatter(**portray(space))\n \n \n@@ -91,6 +93,14 @@\n out[\"c\"] = c\n return out\n \n+ width = space.x_max - space.x_min\n+ x_padding = width / 20\n+ height = space.y_max - space.y_min\n+ y_padding = height / 20\n+ space_ax.set_xlim(space.x_min - x_padding, space.x_max + x_padding)\n+ space_ax.set_ylim(space.y_min - y_padding, space.y_max + y_padding)\n+ space_ax.scatter(**portray(space))\n+\n space_ax.scatter(**portray(space))\n", "issue": "JupyterViz space view limits\n**What's the problem this feature will solve?**\r\nAt the moment, when drawing spaces with JupyterViz, the axes limits are left to be automatically determined by Matplotlib. Would it not make more sense (in particular of continuous spaces, to derive xlim and xmax from the space itself?\r\n\r\n**Describe the solution you'd like**\r\nThe simplest fix would be to adapt `_draw_grid` to use `space.width` and `space.height` while `_draw_continuos_space` uses `space.x_min`, `space.x_max`, `space.y_min`, and `space.y_max`. Are there reasons where the user might want to rely on matpltolib's automatic determination of the bounds? If so, then a slightly more sophisticated solution would be needed where kwargs are passed to from jupyterviz to the underlying space_drawer function. So you would add a `space_drawer_kwargs` keyword argument.\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport networkx as nx\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n\[email protected]\ndef SpaceMatplotlib(model, agent_portrayal, dependencies: Optional[list[any]] = None):\n space_fig = Figure()\n space_ax = space_fig.subplots()\n space = getattr(model, \"grid\", None)\n if space is None:\n # Sometimes the space is defined as model.space instead of model.grid\n space = model.space\n if isinstance(space, mesa.space.NetworkGrid):\n _draw_network_grid(space, space_ax, agent_portrayal)\n elif isinstance(space, mesa.space.ContinuousSpace):\n _draw_continuous_space(space, space_ax, agent_portrayal)\n else:\n _draw_grid(space, space_ax, agent_portrayal)\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig, format=\"png\", dependencies=dependencies)\n\n\ndef _draw_grid(space, space_ax, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef _draw_network_grid(space, space_ax, agent_portrayal):\n graph = space.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef _draw_continuous_space(space, space_ax, agent_portrayal):\n def portray(space):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for agent in space._agent_to_index:\n data = agent_portrayal(agent)\n _x, _y = agent.pos\n x.append(_x)\n y.append(_y)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_ax.scatter(**portray(space))\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n if isinstance(measure, str):\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n elif isinstance(measure, dict):\n for m, color in measure.items():\n ax.plot(df.loc[:, m], label=m, color=color)\n fig.legend()\n elif isinstance(measure, (list, tuple)):\n for m in measure:\n ax.plot(df.loc[:, m], label=m)\n fig.legend()\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n", "path": "mesa/experimental/components/matplotlib.py"}]}
| 1,784 | 233 |
gh_patches_debug_39815
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-773
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Misleading (wrong?) code in Oasis VBM example
As pointed out on http://neurostars.org/p/3388/, the Oasis VBM code applies the NiftiMasker twice, and thus ends up smoothing the data twice.
It does that to do screening of low variance features. I think that the right way to do this would be to use a sklearn.feature_selection.VarianceThreshold in the pipeline.
</issue>
<code>
[start of examples/decoding/plot_oasis_vbm.py]
1 """
2 Voxel-Based Morphometry on Oasis dataset
3 ========================================
4
5 This example uses Voxel-Based Morphometry (VBM) to study the relationship
6 between aging and gray matter density.
7
8 The data come from the `OASIS <http://www.oasis-brains.org/>`_ project.
9 If you use it, you need to agree with the data usage agreement available
10 on the website.
11
12 It has been run through a standard VBM pipeline (using SPM8 and
13 NewSegment) to create VBM maps, which we study here.
14
15 Predictive modeling analysis: VBM bio-markers of aging?
16 --------------------------------------------------------
17
18 We run a standard SVM-ANOVA nilearn pipeline to predict age from the VBM
19 data. We use only 100 subjects from the OASIS dataset to limit the memory
20 usage.
21
22 Note that for an actual predictive modeling study of aging, the study
23 should be ran on the full set of subjects. Also, parameters such as the
24 smoothing applied to the data and the number of features selected by the
25 Anova step should be set by nested cross-validation, as they impact
26 significantly the prediction score.
27
28 Brain mapping with mass univariate
29 -----------------------------------
30
31 SVM weights are very noisy, partly because heavy smoothing is detrimental
32 for the prediction here. A standard analysis using mass-univariate GLM
33 (here permuted to have exact correction for multiple comparisons) gives a
34 much clearer view of the important regions.
35
36 ____
37
38 """
39 # Authors: Elvis Dhomatob, <[email protected]>, Apr. 2014
40 # Virgile Fritsch, <[email protected]>, Apr 2014
41 # Gael Varoquaux, Apr 2014
42 import numpy as np
43 from scipy import linalg
44 import matplotlib.pyplot as plt
45 from nilearn import datasets
46 from nilearn.input_data import NiftiMasker
47
48 n_subjects = 100 # more subjects requires more memory
49
50 ### Load Oasis dataset ########################################################
51 oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
52 gray_matter_map_filenames = oasis_dataset.gray_matter_maps
53 age = oasis_dataset.ext_vars['age'].astype(float)
54
55 # print basic information on the dataset
56 print('First gray-matter anatomy image (3D) is located at: %s' %
57 oasis_dataset.gray_matter_maps[0]) # 3D data
58 print('First white-matter anatomy image (3D) is located at: %s' %
59 oasis_dataset.white_matter_maps[0]) # 3D data
60
61 ### Preprocess data ###########################################################
62 nifti_masker = NiftiMasker(
63 standardize=False,
64 smoothing_fwhm=2,
65 memory='nilearn_cache') # cache options
66 # remove features with too low between-subject variance
67 gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
68 gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
69 # final masking
70 new_images = nifti_masker.inverse_transform(gm_maps_masked)
71 gm_maps_masked = nifti_masker.fit_transform(new_images)
72 n_samples, n_features = gm_maps_masked.shape
73 print("%d samples, %d features" % (n_subjects, n_features))
74
75 ### Prediction with SVR #######################################################
76 print("ANOVA + SVR")
77 # Define the prediction function to be used.
78 # Here we use a Support Vector Classification, with a linear kernel
79 from sklearn.svm import SVR
80 svr = SVR(kernel='linear')
81
82 # Dimension reduction
83 from sklearn.feature_selection import SelectKBest, f_regression
84
85 # Here we use a classical univariate feature selection based on F-test,
86 # namely Anova.
87 feature_selection = SelectKBest(f_regression, k=2000)
88
89 # We have our predictor (SVR), our feature selection (SelectKBest), and now,
90 # we can plug them together in a *pipeline* that performs the two operations
91 # successively:
92 from sklearn.pipeline import Pipeline
93 anova_svr = Pipeline([('anova', feature_selection), ('svr', svr)])
94
95 ### Fit and predict
96 anova_svr.fit(gm_maps_masked, age)
97 age_pred = anova_svr.predict(gm_maps_masked)
98
99 # Visualization
100 # Look at the SVR's discriminating weights
101 coef = svr.coef_
102 # reverse feature selection
103 coef = feature_selection.inverse_transform(coef)
104 # reverse masking
105 weight_img = nifti_masker.inverse_transform(coef)
106
107 # Create the figure
108 from nilearn.plotting import plot_stat_map, show
109 bg_filename = gray_matter_map_filenames[0]
110 z_slice = 0
111 from nilearn.image.resampling import coord_transform
112 affine = weight_img.get_affine()
113 _, _, k_slice = coord_transform(0, 0, z_slice,
114 linalg.inv(affine))
115 k_slice = np.round(k_slice)
116
117 fig = plt.figure(figsize=(5.5, 7.5), facecolor='k')
118 weight_slice_data = weight_img.get_data()[..., k_slice, 0]
119 vmax = max(-np.min(weight_slice_data), np.max(weight_slice_data)) * 0.5
120 display = plot_stat_map(weight_img, bg_img=bg_filename,
121 display_mode='z', cut_coords=[z_slice],
122 figure=fig, vmax=vmax)
123 display.title('SVM weights', y=1.2)
124
125 # Measure accuracy with cross validation
126 from sklearn.cross_validation import cross_val_score
127 cv_scores = cross_val_score(anova_svr, gm_maps_masked, age)
128
129 # Return the corresponding mean prediction accuracy
130 prediction_accuracy = np.mean(cv_scores)
131 print("=== ANOVA ===")
132 print("Prediction accuracy: %f" % prediction_accuracy)
133 print("")
134
135 ### Inference with massively univariate model #################################
136 print("Massively univariate model")
137
138 # Statistical inference
139 from nilearn.mass_univariate import permuted_ols
140 neg_log_pvals, t_scores_original_data, _ = permuted_ols(
141 age, gm_maps_masked, # + intercept as a covariate by default
142 n_perm=1000, # 1,000 in the interest of time; 10000 would be better
143 n_jobs=1) # can be changed to use more CPUs
144 signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
145 signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
146 signed_neg_log_pvals)
147
148 # Show results
149 threshold = -np.log10(0.1) # 10% corrected
150
151 fig = plt.figure(figsize=(5.5, 7.5), facecolor='k')
152
153 display = plot_stat_map(signed_neg_log_pvals_unmasked, bg_img=bg_filename,
154 threshold=threshold, cmap=plt.cm.RdBu_r,
155 display_mode='z', cut_coords=[z_slice],
156 figure=fig)
157 title = ('Negative $\log_{10}$ p-values'
158 '\n(Non-parametric + max-type correction)')
159 display.title(title, y=1.2)
160
161 signed_neg_log_pvals_slice_data = \
162 signed_neg_log_pvals_unmasked.get_data()[..., k_slice, 0]
163 n_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum()
164 print('\n%d detections' % n_detections)
165
166 show()
167
[end of examples/decoding/plot_oasis_vbm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/decoding/plot_oasis_vbm.py b/examples/decoding/plot_oasis_vbm.py
--- a/examples/decoding/plot_oasis_vbm.py
+++ b/examples/decoding/plot_oasis_vbm.py
@@ -63,12 +63,7 @@
standardize=False,
smoothing_fwhm=2,
memory='nilearn_cache') # cache options
-# remove features with too low between-subject variance
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
-gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
-# final masking
-new_images = nifti_masker.inverse_transform(gm_maps_masked)
-gm_maps_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = gm_maps_masked.shape
print("%d samples, %d features" % (n_subjects, n_features))
@@ -80,7 +75,11 @@
svr = SVR(kernel='linear')
# Dimension reduction
-from sklearn.feature_selection import SelectKBest, f_regression
+from sklearn.feature_selection import VarianceThreshold, SelectKBest, \
+ f_regression
+
+# Remove features with too low between-subject variance
+variance_threshold = VarianceThreshold(threshold=.01)
# Here we use a classical univariate feature selection based on F-test,
# namely Anova.
@@ -90,7 +89,10 @@
# we can plug them together in a *pipeline* that performs the two operations
# successively:
from sklearn.pipeline import Pipeline
-anova_svr = Pipeline([('anova', feature_selection), ('svr', svr)])
+anova_svr = Pipeline([
+ ('variance_threshold', variance_threshold),
+ ('anova', feature_selection),
+ ('svr', svr)])
### Fit and predict
anova_svr.fit(gm_maps_masked, age)
@@ -101,6 +103,8 @@
coef = svr.coef_
# reverse feature selection
coef = feature_selection.inverse_transform(coef)
+# reverse variance threshold
+coef = variance_threshold.inverse_transform(coef)
# reverse masking
weight_img = nifti_masker.inverse_transform(coef)
@@ -137,13 +141,14 @@
# Statistical inference
from nilearn.mass_univariate import permuted_ols
+data = variance_threshold.fit_transform(gm_maps_masked)
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
- age, gm_maps_masked, # + intercept as a covariate by default
- n_perm=1000, # 1,000 in the interest of time; 10000 would be better
+ age, data, # + intercept as a covariate by default
+ n_perm=2000, # 1,000 in the interest of time; 10000 would be better
n_jobs=1) # can be changed to use more CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
- signed_neg_log_pvals)
+ variance_threshold.inverse_transform(signed_neg_log_pvals))
# Show results
threshold = -np.log10(0.1) # 10% corrected
|
{"golden_diff": "diff --git a/examples/decoding/plot_oasis_vbm.py b/examples/decoding/plot_oasis_vbm.py\n--- a/examples/decoding/plot_oasis_vbm.py\n+++ b/examples/decoding/plot_oasis_vbm.py\n@@ -63,12 +63,7 @@\n standardize=False,\n smoothing_fwhm=2,\n memory='nilearn_cache') # cache options\n-# remove features with too low between-subject variance\n gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)\n-gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.\n-# final masking\n-new_images = nifti_masker.inverse_transform(gm_maps_masked)\n-gm_maps_masked = nifti_masker.fit_transform(new_images)\n n_samples, n_features = gm_maps_masked.shape\n print(\"%d samples, %d features\" % (n_subjects, n_features))\n \n@@ -80,7 +75,11 @@\n svr = SVR(kernel='linear')\n \n # Dimension reduction\n-from sklearn.feature_selection import SelectKBest, f_regression\n+from sklearn.feature_selection import VarianceThreshold, SelectKBest, \\\n+ f_regression\n+\n+# Remove features with too low between-subject variance\n+variance_threshold = VarianceThreshold(threshold=.01)\n \n # Here we use a classical univariate feature selection based on F-test,\n # namely Anova.\n@@ -90,7 +89,10 @@\n # we can plug them together in a *pipeline* that performs the two operations\n # successively:\n from sklearn.pipeline import Pipeline\n-anova_svr = Pipeline([('anova', feature_selection), ('svr', svr)])\n+anova_svr = Pipeline([\n+ ('variance_threshold', variance_threshold),\n+ ('anova', feature_selection),\n+ ('svr', svr)])\n \n ### Fit and predict\n anova_svr.fit(gm_maps_masked, age)\n@@ -101,6 +103,8 @@\n coef = svr.coef_\n # reverse feature selection\n coef = feature_selection.inverse_transform(coef)\n+# reverse variance threshold\n+coef = variance_threshold.inverse_transform(coef)\n # reverse masking\n weight_img = nifti_masker.inverse_transform(coef)\n \n@@ -137,13 +141,14 @@\n \n # Statistical inference\n from nilearn.mass_univariate import permuted_ols\n+data = variance_threshold.fit_transform(gm_maps_masked)\n neg_log_pvals, t_scores_original_data, _ = permuted_ols(\n- age, gm_maps_masked, # + intercept as a covariate by default\n- n_perm=1000, # 1,000 in the interest of time; 10000 would be better\n+ age, data, # + intercept as a covariate by default\n+ n_perm=2000, # 1,000 in the interest of time; 10000 would be better\n n_jobs=1) # can be changed to use more CPUs\n signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)\n signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(\n- signed_neg_log_pvals)\n+ variance_threshold.inverse_transform(signed_neg_log_pvals))\n \n # Show results\n threshold = -np.log10(0.1) # 10% corrected\n", "issue": "Misleading (wrong?) code in Oasis VBM example\nAs pointed out on http://neurostars.org/p/3388/, the Oasis VBM code applies the NiftiMasker twice, and thus ends up smoothing the data twice.\n\nIt does that to do screening of low variance features. I think that the right way to do this would be to use a sklearn.feature_selection.VarianceThreshold in the pipeline.\n\n", "before_files": [{"content": "\"\"\"\nVoxel-Based Morphometry on Oasis dataset\n========================================\n\nThis example uses Voxel-Based Morphometry (VBM) to study the relationship\nbetween aging and gray matter density.\n\nThe data come from the `OASIS <http://www.oasis-brains.org/>`_ project.\nIf you use it, you need to agree with the data usage agreement available\non the website.\n\nIt has been run through a standard VBM pipeline (using SPM8 and\nNewSegment) to create VBM maps, which we study here.\n\nPredictive modeling analysis: VBM bio-markers of aging?\n--------------------------------------------------------\n\nWe run a standard SVM-ANOVA nilearn pipeline to predict age from the VBM\ndata. We use only 100 subjects from the OASIS dataset to limit the memory\nusage.\n\nNote that for an actual predictive modeling study of aging, the study\nshould be ran on the full set of subjects. Also, parameters such as the\nsmoothing applied to the data and the number of features selected by the\nAnova step should be set by nested cross-validation, as they impact\nsignificantly the prediction score.\n\nBrain mapping with mass univariate\n-----------------------------------\n\nSVM weights are very noisy, partly because heavy smoothing is detrimental\nfor the prediction here. A standard analysis using mass-univariate GLM\n(here permuted to have exact correction for multiple comparisons) gives a\nmuch clearer view of the important regions.\n\n____\n\n\"\"\"\n# Authors: Elvis Dhomatob, <[email protected]>, Apr. 2014\n# Virgile Fritsch, <[email protected]>, Apr 2014\n# Gael Varoquaux, Apr 2014\nimport numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nfrom nilearn import datasets\nfrom nilearn.input_data import NiftiMasker\n\nn_subjects = 100 # more subjects requires more memory\n\n### Load Oasis dataset ########################################################\noasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)\ngray_matter_map_filenames = oasis_dataset.gray_matter_maps\nage = oasis_dataset.ext_vars['age'].astype(float)\n\n# print basic information on the dataset\nprint('First gray-matter anatomy image (3D) is located at: %s' %\n oasis_dataset.gray_matter_maps[0]) # 3D data\nprint('First white-matter anatomy image (3D) is located at: %s' %\n oasis_dataset.white_matter_maps[0]) # 3D data\n\n### Preprocess data ###########################################################\nnifti_masker = NiftiMasker(\n standardize=False,\n smoothing_fwhm=2,\n memory='nilearn_cache') # cache options\n# remove features with too low between-subject variance\ngm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)\ngm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.\n# final masking\nnew_images = nifti_masker.inverse_transform(gm_maps_masked)\ngm_maps_masked = nifti_masker.fit_transform(new_images)\nn_samples, n_features = gm_maps_masked.shape\nprint(\"%d samples, %d features\" % (n_subjects, n_features))\n\n### Prediction with SVR #######################################################\nprint(\"ANOVA + SVR\")\n# Define the prediction function to be used.\n# Here we use a Support Vector Classification, with a linear kernel\nfrom sklearn.svm import SVR\nsvr = SVR(kernel='linear')\n\n# Dimension reduction\nfrom sklearn.feature_selection import SelectKBest, f_regression\n\n# Here we use a classical univariate feature selection based on F-test,\n# namely Anova.\nfeature_selection = SelectKBest(f_regression, k=2000)\n\n# We have our predictor (SVR), our feature selection (SelectKBest), and now,\n# we can plug them together in a *pipeline* that performs the two operations\n# successively:\nfrom sklearn.pipeline import Pipeline\nanova_svr = Pipeline([('anova', feature_selection), ('svr', svr)])\n\n### Fit and predict\nanova_svr.fit(gm_maps_masked, age)\nage_pred = anova_svr.predict(gm_maps_masked)\n\n# Visualization\n# Look at the SVR's discriminating weights\ncoef = svr.coef_\n# reverse feature selection\ncoef = feature_selection.inverse_transform(coef)\n# reverse masking\nweight_img = nifti_masker.inverse_transform(coef)\n\n# Create the figure\nfrom nilearn.plotting import plot_stat_map, show\nbg_filename = gray_matter_map_filenames[0]\nz_slice = 0\nfrom nilearn.image.resampling import coord_transform\naffine = weight_img.get_affine()\n_, _, k_slice = coord_transform(0, 0, z_slice,\n linalg.inv(affine))\nk_slice = np.round(k_slice)\n\nfig = plt.figure(figsize=(5.5, 7.5), facecolor='k')\nweight_slice_data = weight_img.get_data()[..., k_slice, 0]\nvmax = max(-np.min(weight_slice_data), np.max(weight_slice_data)) * 0.5\ndisplay = plot_stat_map(weight_img, bg_img=bg_filename,\n display_mode='z', cut_coords=[z_slice],\n figure=fig, vmax=vmax)\ndisplay.title('SVM weights', y=1.2)\n\n# Measure accuracy with cross validation\nfrom sklearn.cross_validation import cross_val_score\ncv_scores = cross_val_score(anova_svr, gm_maps_masked, age)\n\n# Return the corresponding mean prediction accuracy\nprediction_accuracy = np.mean(cv_scores)\nprint(\"=== ANOVA ===\")\nprint(\"Prediction accuracy: %f\" % prediction_accuracy)\nprint(\"\")\n\n### Inference with massively univariate model #################################\nprint(\"Massively univariate model\")\n\n# Statistical inference\nfrom nilearn.mass_univariate import permuted_ols\nneg_log_pvals, t_scores_original_data, _ = permuted_ols(\n age, gm_maps_masked, # + intercept as a covariate by default\n n_perm=1000, # 1,000 in the interest of time; 10000 would be better\n n_jobs=1) # can be changed to use more CPUs\nsigned_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)\nsigned_neg_log_pvals_unmasked = nifti_masker.inverse_transform(\n signed_neg_log_pvals)\n\n# Show results\nthreshold = -np.log10(0.1) # 10% corrected\n\nfig = plt.figure(figsize=(5.5, 7.5), facecolor='k')\n\ndisplay = plot_stat_map(signed_neg_log_pvals_unmasked, bg_img=bg_filename,\n threshold=threshold, cmap=plt.cm.RdBu_r,\n display_mode='z', cut_coords=[z_slice],\n figure=fig)\ntitle = ('Negative $\\log_{10}$ p-values'\n '\\n(Non-parametric + max-type correction)')\ndisplay.title(title, y=1.2)\n\nsigned_neg_log_pvals_slice_data = \\\n signed_neg_log_pvals_unmasked.get_data()[..., k_slice, 0]\nn_detections = (np.abs(signed_neg_log_pvals_slice_data) > threshold).sum()\nprint('\\n%d detections' % n_detections)\n\nshow()\n", "path": "examples/decoding/plot_oasis_vbm.py"}]}
| 2,644 | 769 |
gh_patches_debug_27391
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-250
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Admin panel - Add ability to import multiple users at once
It would be nice to add a list of users as a bulk list of newline delimited users.
</issue>
<code>
[start of jupyterhub/apihandlers/users.py]
1 """User handlers"""
2
3 # Copyright (c) Jupyter Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import json
7
8 from tornado import gen, web
9
10 from .. import orm
11 from ..utils import admin_only
12 from .base import APIHandler
13
14
15 class UserListAPIHandler(APIHandler):
16 @admin_only
17 def get(self):
18 users = self.db.query(orm.User)
19 data = [ self.user_model(u) for u in users ]
20 self.write(json.dumps(data))
21
22
23 def admin_or_self(method):
24 """Decorator for restricting access to either the target user or admin"""
25 def m(self, name):
26 current = self.get_current_user()
27 if current is None:
28 raise web.HTTPError(403)
29 if not (current.name == name or current.admin):
30 raise web.HTTPError(403)
31
32 # raise 404 if not found
33 if not self.find_user(name):
34 raise web.HTTPError(404)
35 return method(self, name)
36 return m
37
38 class UserAPIHandler(APIHandler):
39
40 @admin_or_self
41 def get(self, name):
42 user = self.find_user(name)
43 self.write(json.dumps(self.user_model(user)))
44
45 @admin_only
46 @gen.coroutine
47 def post(self, name):
48 data = self.get_json_body()
49 user = self.find_user(name)
50 if user is not None:
51 raise web.HTTPError(400, "User %s already exists" % name)
52
53 user = self.user_from_username(name)
54 if data:
55 self._check_user_model(data)
56 if 'admin' in data:
57 user.admin = data['admin']
58 self.db.commit()
59
60 try:
61 yield gen.maybe_future(self.authenticator.add_user(user))
62 except Exception:
63 self.log.error("Failed to create user: %s" % name, exc_info=True)
64 self.db.delete(user)
65 self.db.commit()
66 raise web.HTTPError(400, "Failed to create user: %s" % name)
67
68 self.write(json.dumps(self.user_model(user)))
69 self.set_status(201)
70
71 @admin_only
72 @gen.coroutine
73 def delete(self, name):
74 user = self.find_user(name)
75 if user is None:
76 raise web.HTTPError(404)
77 if user.name == self.get_current_user().name:
78 raise web.HTTPError(400, "Cannot delete yourself!")
79 if user.stop_pending:
80 raise web.HTTPError(400, "%s's server is in the process of stopping, please wait." % name)
81 if user.running:
82 yield self.stop_single_user(user)
83 if user.stop_pending:
84 raise web.HTTPError(400, "%s's server is in the process of stopping, please wait." % name)
85
86 yield gen.maybe_future(self.authenticator.delete_user(user))
87
88 # remove from the db
89 self.db.delete(user)
90 self.db.commit()
91
92 self.set_status(204)
93
94 @admin_only
95 def patch(self, name):
96 user = self.find_user(name)
97 if user is None:
98 raise web.HTTPError(404)
99 data = self.get_json_body()
100 self._check_user_model(data)
101 for key, value in data.items():
102 setattr(user, key, value)
103 self.db.commit()
104 self.write(json.dumps(self.user_model(user)))
105
106
107 class UserServerAPIHandler(APIHandler):
108 @gen.coroutine
109 @admin_or_self
110 def post(self, name):
111 user = self.find_user(name)
112 if user.spawner:
113 state = yield user.spawner.poll()
114 if state is None:
115 raise web.HTTPError(400, "%s's server is already running" % name)
116
117 yield self.spawn_single_user(user)
118 status = 202 if user.spawn_pending else 201
119 self.set_status(status)
120
121 @gen.coroutine
122 @admin_or_self
123 def delete(self, name):
124 user = self.find_user(name)
125 if user.stop_pending:
126 self.set_status(202)
127 return
128 if not user.running:
129 raise web.HTTPError(400, "%s's server is not running" % name)
130 status = yield user.spawner.poll()
131 if status is not None:
132 raise web.HTTPError(400, "%s's server is not running" % name)
133 yield self.stop_single_user(user)
134 status = 202 if user.stop_pending else 204
135 self.set_status(status)
136
137 class UserAdminAccessAPIHandler(APIHandler):
138 """Grant admins access to single-user servers
139
140 This handler sets the necessary cookie for an admin to login to a single-user server.
141 """
142 @admin_only
143 def post(self, name):
144 current = self.get_current_user()
145 self.log.warn("Admin user %s has requested access to %s's server",
146 current.name, name,
147 )
148 if not self.settings.get('admin_access', False):
149 raise web.HTTPError(403, "admin access to user servers disabled")
150 user = self.find_user(name)
151 if user is None:
152 raise web.HTTPError(404)
153 if not user.running:
154 raise web.HTTPError(400, "%s's server is not running" % name)
155 self.set_server_cookie(user)
156
157
158 default_handlers = [
159 (r"/api/users", UserListAPIHandler),
160 (r"/api/users/([^/]+)", UserAPIHandler),
161 (r"/api/users/([^/]+)/server", UserServerAPIHandler),
162 (r"/api/users/([^/]+)/admin-access", UserAdminAccessAPIHandler),
163 ]
164
[end of jupyterhub/apihandlers/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py
--- a/jupyterhub/apihandlers/users.py
+++ b/jupyterhub/apihandlers/users.py
@@ -18,6 +18,49 @@
users = self.db.query(orm.User)
data = [ self.user_model(u) for u in users ]
self.write(json.dumps(data))
+
+ @admin_only
+ @gen.coroutine
+ def post(self):
+ data = self.get_json_body()
+ if not data or not isinstance(data, dict) or not data.get('usernames'):
+ raise web.HTTPError(400, "Must specify at least one user to create")
+
+ usernames = data.pop('usernames')
+ self._check_user_model(data)
+ # admin is set for all users
+ # to create admin and non-admin users requires at least two API requests
+ admin = data.get('admin', False)
+
+ to_create = []
+ for name in usernames:
+ user = self.find_user(name)
+ if user is not None:
+ self.log.warn("User %s already exists" % name)
+ else:
+ to_create.append(name)
+
+ if not to_create:
+ raise web.HTTPError(400, "All %i users already exist" % len(usernames))
+
+ created = []
+ for name in to_create:
+ user = self.user_from_username(name)
+ if admin:
+ user.admin = True
+ self.db.commit()
+ try:
+ yield gen.maybe_future(self.authenticator.add_user(user))
+ except Exception:
+ self.log.error("Failed to create user: %s" % name, exc_info=True)
+ self.db.delete(user)
+ self.db.commit()
+ raise web.HTTPError(400, "Failed to create user: %s" % name)
+ else:
+ created.append(user)
+
+ self.write(json.dumps([ self.user_model(u) for u in created ]))
+ self.set_status(201)
def admin_or_self(method):
|
{"golden_diff": "diff --git a/jupyterhub/apihandlers/users.py b/jupyterhub/apihandlers/users.py\n--- a/jupyterhub/apihandlers/users.py\n+++ b/jupyterhub/apihandlers/users.py\n@@ -18,6 +18,49 @@\n users = self.db.query(orm.User)\n data = [ self.user_model(u) for u in users ]\n self.write(json.dumps(data))\n+ \n+ @admin_only\n+ @gen.coroutine\n+ def post(self):\n+ data = self.get_json_body()\n+ if not data or not isinstance(data, dict) or not data.get('usernames'):\n+ raise web.HTTPError(400, \"Must specify at least one user to create\")\n+ \n+ usernames = data.pop('usernames')\n+ self._check_user_model(data)\n+ # admin is set for all users\n+ # to create admin and non-admin users requires at least two API requests\n+ admin = data.get('admin', False)\n+ \n+ to_create = []\n+ for name in usernames:\n+ user = self.find_user(name)\n+ if user is not None:\n+ self.log.warn(\"User %s already exists\" % name)\n+ else:\n+ to_create.append(name)\n+ \n+ if not to_create:\n+ raise web.HTTPError(400, \"All %i users already exist\" % len(usernames))\n+ \n+ created = []\n+ for name in to_create:\n+ user = self.user_from_username(name)\n+ if admin:\n+ user.admin = True\n+ self.db.commit()\n+ try:\n+ yield gen.maybe_future(self.authenticator.add_user(user))\n+ except Exception:\n+ self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n+ self.db.delete(user)\n+ self.db.commit()\n+ raise web.HTTPError(400, \"Failed to create user: %s\" % name)\n+ else:\n+ created.append(user)\n+ \n+ self.write(json.dumps([ self.user_model(u) for u in created ]))\n+ self.set_status(201)\n \n \n def admin_or_self(method):\n", "issue": "Admin panel - Add ability to import multiple users at once\nIt would be nice to add a list of users as a bulk list of newline delimited users.\n\n", "before_files": [{"content": "\"\"\"User handlers\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom tornado import gen, web\n\nfrom .. import orm\nfrom ..utils import admin_only\nfrom .base import APIHandler\n\n\nclass UserListAPIHandler(APIHandler):\n @admin_only\n def get(self):\n users = self.db.query(orm.User)\n data = [ self.user_model(u) for u in users ]\n self.write(json.dumps(data))\n\n\ndef admin_or_self(method):\n \"\"\"Decorator for restricting access to either the target user or admin\"\"\"\n def m(self, name):\n current = self.get_current_user()\n if current is None:\n raise web.HTTPError(403)\n if not (current.name == name or current.admin):\n raise web.HTTPError(403)\n \n # raise 404 if not found\n if not self.find_user(name):\n raise web.HTTPError(404)\n return method(self, name)\n return m\n\nclass UserAPIHandler(APIHandler):\n \n @admin_or_self\n def get(self, name):\n user = self.find_user(name)\n self.write(json.dumps(self.user_model(user)))\n \n @admin_only\n @gen.coroutine\n def post(self, name):\n data = self.get_json_body()\n user = self.find_user(name)\n if user is not None:\n raise web.HTTPError(400, \"User %s already exists\" % name)\n \n user = self.user_from_username(name)\n if data:\n self._check_user_model(data)\n if 'admin' in data:\n user.admin = data['admin']\n self.db.commit()\n \n try:\n yield gen.maybe_future(self.authenticator.add_user(user))\n except Exception:\n self.log.error(\"Failed to create user: %s\" % name, exc_info=True)\n self.db.delete(user)\n self.db.commit()\n raise web.HTTPError(400, \"Failed to create user: %s\" % name)\n \n self.write(json.dumps(self.user_model(user)))\n self.set_status(201)\n \n @admin_only\n @gen.coroutine\n def delete(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n if user.name == self.get_current_user().name:\n raise web.HTTPError(400, \"Cannot delete yourself!\")\n if user.stop_pending:\n raise web.HTTPError(400, \"%s's server is in the process of stopping, please wait.\" % name)\n if user.running:\n yield self.stop_single_user(user)\n if user.stop_pending:\n raise web.HTTPError(400, \"%s's server is in the process of stopping, please wait.\" % name)\n \n yield gen.maybe_future(self.authenticator.delete_user(user))\n \n # remove from the db\n self.db.delete(user)\n self.db.commit()\n \n self.set_status(204)\n \n @admin_only\n def patch(self, name):\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n data = self.get_json_body()\n self._check_user_model(data)\n for key, value in data.items():\n setattr(user, key, value)\n self.db.commit()\n self.write(json.dumps(self.user_model(user)))\n\n\nclass UserServerAPIHandler(APIHandler):\n @gen.coroutine\n @admin_or_self\n def post(self, name):\n user = self.find_user(name)\n if user.spawner:\n state = yield user.spawner.poll()\n if state is None:\n raise web.HTTPError(400, \"%s's server is already running\" % name)\n\n yield self.spawn_single_user(user)\n status = 202 if user.spawn_pending else 201\n self.set_status(status)\n\n @gen.coroutine\n @admin_or_self\n def delete(self, name):\n user = self.find_user(name)\n if user.stop_pending:\n self.set_status(202)\n return\n if not user.running:\n raise web.HTTPError(400, \"%s's server is not running\" % name)\n status = yield user.spawner.poll()\n if status is not None:\n raise web.HTTPError(400, \"%s's server is not running\" % name)\n yield self.stop_single_user(user)\n status = 202 if user.stop_pending else 204\n self.set_status(status)\n\nclass UserAdminAccessAPIHandler(APIHandler):\n \"\"\"Grant admins access to single-user servers\n \n This handler sets the necessary cookie for an admin to login to a single-user server.\n \"\"\"\n @admin_only\n def post(self, name):\n current = self.get_current_user()\n self.log.warn(\"Admin user %s has requested access to %s's server\",\n current.name, name,\n )\n if not self.settings.get('admin_access', False):\n raise web.HTTPError(403, \"admin access to user servers disabled\")\n user = self.find_user(name)\n if user is None:\n raise web.HTTPError(404)\n if not user.running:\n raise web.HTTPError(400, \"%s's server is not running\" % name)\n self.set_server_cookie(user)\n\n\ndefault_handlers = [\n (r\"/api/users\", UserListAPIHandler),\n (r\"/api/users/([^/]+)\", UserAPIHandler),\n (r\"/api/users/([^/]+)/server\", UserServerAPIHandler),\n (r\"/api/users/([^/]+)/admin-access\", UserAdminAccessAPIHandler),\n]\n", "path": "jupyterhub/apihandlers/users.py"}]}
| 2,192 | 475 |
gh_patches_debug_2900
|
rasdani/github-patches
|
git_diff
|
Chia-Network__chia-blockchain-15508
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Module `chia.wallet.puzzles.clawback` not found
### What happened?
When installing `1.8.2-rc3` or `master` via `pip`, the module `chia.wallet.puzzles.clawback` is missing. The files are not included because the packages are not listed in `setup.py`. This is also true of the `prefarm` sibling package.
### Version
1.8.2-rc3
### What platform are you using?
Linux
### What ui mode are you using?
CLI
### Relevant log output
```shell
$ pip install git+https://github.com/chia-network/chia-blockchain
Collecting git+https://github.com/chia-network/chia-blockchain
Cloning https://github.com/chia-network/chia-blockchain to /tmp/pip-req-build-m26feywu
Running command git clone --filter=blob:none --quiet https://github.com/chia-network/chia-blockchain /tmp/pip-req-build-m26feywu
Resolved https://github.com/chia-network/chia-blockchain to commit 49140b2b3c0c128f2464c0b4e50c496e7029939d
Running command git submodule update --init --recursive -q
[snip]
$ python3
>>> import chia.wallet.wallet
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.11/site-packages/chia/wallet/wallet.py", line 23, in <module>
from chia.wallet.coin_selection import select_coins
File "/usr/local/lib/python3.11/site-packages/chia/wallet/coin_selection.py", line 10, in <module>
from chia.wallet.wallet_coin_record import WalletCoinRecord
File "/usr/local/lib/python3.11/site-packages/chia/wallet/wallet_coin_record.py", line 11, in <module>
from chia.wallet.puzzles.clawback.metadata import ClawbackMetadata, ClawbackVersion
ModuleNotFoundError: No module named 'chia.wallet.puzzles.clawback'
```
</issue>
<code>
[start of setup.py]
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from setuptools import setup
7
8 dependencies = [
9 "aiofiles==23.1.0", # Async IO for files
10 "anyio==3.6.2",
11 "boto3==1.26.148", # AWS S3 for DL s3 plugin
12 "blspy==1.0.16", # Signature library
13 "chiavdf==1.0.8", # timelord and vdf verification
14 "chiabip158==1.2", # bip158-style wallet filters
15 "chiapos==1.0.11", # proof of space
16 "clvm==0.9.7",
17 "clvm_tools==0.4.6", # Currying, Program.to, other conveniences
18 "chia_rs==0.2.7",
19 "clvm-tools-rs==0.1.34", # Rust implementation of clvm_tools' compiler
20 "aiohttp==3.8.4", # HTTP server for full node rpc
21 "aiosqlite==0.19.0", # asyncio wrapper for sqlite, to store blocks
22 "bitstring==4.0.2", # Binary data management library
23 "colorama==0.4.6", # Colorizes terminal output
24 "colorlog==6.7.0", # Adds color to logs
25 "concurrent-log-handler==0.9.24", # Concurrently log and rotate logs
26 "cryptography==41.0.0", # Python cryptography library for TLS - keyring conflict
27 "filelock==3.12.0", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)
28 "keyring==23.13.1", # Store keys in MacOS Keychain, Windows Credential Locker
29 "PyYAML==6.0", # Used for config file format
30 "setproctitle==1.3.2", # Gives the chia processes readable names
31 "sortedcontainers==2.4.0", # For maintaining sorted mempools
32 "click==8.1.3", # For the CLI
33 "dnspython==2.3.0", # Query DNS seeds
34 "watchdog==2.2.0", # Filesystem event watching - watches keyring.yaml
35 "dnslib==0.9.23", # dns lib
36 "typing-extensions==4.6.0", # typing backports like Protocol and TypedDict
37 "zstd==1.5.5.1",
38 "packaging==23.1",
39 "psutil==5.9.4",
40 ]
41
42 upnp_dependencies = [
43 "miniupnpc==2.2.2", # Allows users to open ports on their router
44 ]
45
46 dev_dependencies = [
47 "build",
48 # >=7.2.4 for https://github.com/nedbat/coveragepy/issues/1604
49 "coverage>=7.2.4",
50 "diff-cover",
51 "pre-commit",
52 "py3createtorrent",
53 "pylint",
54 "pytest",
55 "pytest-asyncio>=0.18.1", # require attribute 'fixture'
56 "pytest-cov",
57 "pytest-monitor; sys_platform == 'linux'",
58 "pytest-xdist",
59 "twine",
60 "isort",
61 "flake8",
62 "mypy",
63 "black==23.3.0",
64 "aiohttp_cors", # For blackd
65 "ipython", # For asyncio debugging
66 "pyinstaller==5.11.0",
67 "types-aiofiles",
68 "types-cryptography",
69 "types-pkg_resources",
70 "types-pyyaml",
71 "types-setuptools",
72 ]
73
74 legacy_keyring_dependencies = [
75 "keyrings.cryptfile==1.3.9",
76 ]
77
78 kwargs = dict(
79 name="chia-blockchain",
80 author="Mariano Sorgente",
81 author_email="[email protected]",
82 description="Chia blockchain full node, farmer, timelord, and wallet.",
83 url="https://chia.net/",
84 license="Apache License",
85 python_requires=">=3.7, <4",
86 keywords="chia blockchain node",
87 install_requires=dependencies,
88 extras_require=dict(
89 dev=dev_dependencies,
90 upnp=upnp_dependencies,
91 legacy_keyring=legacy_keyring_dependencies,
92 ),
93 packages=[
94 "build_scripts",
95 "chia",
96 "chia.cmds",
97 "chia.clvm",
98 "chia.consensus",
99 "chia.daemon",
100 "chia.data_layer",
101 "chia.full_node",
102 "chia.timelord",
103 "chia.farmer",
104 "chia.harvester",
105 "chia.introducer",
106 "chia.plot_sync",
107 "chia.plotters",
108 "chia.plotting",
109 "chia.pools",
110 "chia.protocols",
111 "chia.rpc",
112 "chia.seeder",
113 "chia.server",
114 "chia.simulator",
115 "chia.types.blockchain_format",
116 "chia.types",
117 "chia.util",
118 "chia.wallet",
119 "chia.wallet.db_wallet",
120 "chia.wallet.puzzles",
121 "chia.wallet.cat_wallet",
122 "chia.wallet.did_wallet",
123 "chia.wallet.nft_wallet",
124 "chia.wallet.trading",
125 "chia.wallet.util",
126 "chia.wallet.vc_wallet",
127 "chia.wallet.vc_wallet.vc_puzzles",
128 "chia.wallet.vc_wallet.cr_puzzles",
129 "chia.ssl",
130 "mozilla-ca",
131 ],
132 entry_points={
133 "console_scripts": [
134 "chia = chia.cmds.chia:main",
135 "chia_daemon = chia.daemon.server:main",
136 "chia_wallet = chia.server.start_wallet:main",
137 "chia_full_node = chia.server.start_full_node:main",
138 "chia_harvester = chia.server.start_harvester:main",
139 "chia_farmer = chia.server.start_farmer:main",
140 "chia_introducer = chia.server.start_introducer:main",
141 "chia_crawler = chia.seeder.start_crawler:main",
142 "chia_seeder = chia.seeder.dns_server:main",
143 "chia_timelord = chia.server.start_timelord:main",
144 "chia_timelord_launcher = chia.timelord.timelord_launcher:main",
145 "chia_full_node_simulator = chia.simulator.start_simulator:main",
146 "chia_data_layer = chia.server.start_data_layer:main",
147 "chia_data_layer_http = chia.data_layer.data_layer_server:main",
148 "chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server",
149 ]
150 },
151 package_data={
152 "chia": ["pyinstaller.spec"],
153 "": ["*.clsp", "*.clsp.hex", "*.clvm", "*.clib", "py.typed"],
154 "chia.util": ["initial-*.yaml", "english.txt"],
155 "chia.ssl": ["chia_ca.crt", "chia_ca.key", "dst_root_ca.pem"],
156 "mozilla-ca": ["cacert.pem"],
157 },
158 long_description=open("README.md").read(),
159 long_description_content_type="text/markdown",
160 zip_safe=False,
161 project_urls={
162 "Source": "https://github.com/Chia-Network/chia-blockchain/",
163 "Changelog": "https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md",
164 },
165 )
166
167 if "setup_file" in sys.modules:
168 # include dev deps in regular deps when run in snyk
169 dependencies.extend(dev_dependencies)
170
171 if len(os.environ.get("CHIA_SKIP_SETUP", "")) < 1:
172 setup(**kwargs) # type: ignore
173
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -118,6 +118,8 @@
"chia.wallet",
"chia.wallet.db_wallet",
"chia.wallet.puzzles",
+ "chia.wallet.puzzles.clawback",
+ "chia.wallet.puzzles.prefarm",
"chia.wallet.cat_wallet",
"chia.wallet.did_wallet",
"chia.wallet.nft_wallet",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -118,6 +118,8 @@\n \"chia.wallet\",\n \"chia.wallet.db_wallet\",\n \"chia.wallet.puzzles\",\n+ \"chia.wallet.puzzles.clawback\",\n+ \"chia.wallet.puzzles.prefarm\",\n \"chia.wallet.cat_wallet\",\n \"chia.wallet.did_wallet\",\n \"chia.wallet.nft_wallet\",\n", "issue": "[Bug] Module `chia.wallet.puzzles.clawback` not found\n### What happened?\r\n\r\nWhen installing `1.8.2-rc3` or `master` via `pip`, the module `chia.wallet.puzzles.clawback` is missing. The files are not included because the packages are not listed in `setup.py`. This is also true of the `prefarm` sibling package.\r\n\r\n### Version\r\n\r\n1.8.2-rc3\r\n\r\n### What platform are you using?\r\n\r\nLinux\r\n\r\n### What ui mode are you using?\r\n\r\nCLI\r\n\r\n### Relevant log output\r\n\r\n```shell\r\n$ pip install git+https://github.com/chia-network/chia-blockchain\r\nCollecting git+https://github.com/chia-network/chia-blockchain\r\n Cloning https://github.com/chia-network/chia-blockchain to /tmp/pip-req-build-m26feywu\r\n Running command git clone --filter=blob:none --quiet https://github.com/chia-network/chia-blockchain /tmp/pip-req-build-m26feywu\r\n Resolved https://github.com/chia-network/chia-blockchain to commit 49140b2b3c0c128f2464c0b4e50c496e7029939d\r\n Running command git submodule update --init --recursive -q\r\n[snip]\r\n$ python3\r\n>>> import chia.wallet.wallet\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.11/site-packages/chia/wallet/wallet.py\", line 23, in <module>\r\n from chia.wallet.coin_selection import select_coins\r\n File \"/usr/local/lib/python3.11/site-packages/chia/wallet/coin_selection.py\", line 10, in <module>\r\n from chia.wallet.wallet_coin_record import WalletCoinRecord\r\n File \"/usr/local/lib/python3.11/site-packages/chia/wallet/wallet_coin_record.py\", line 11, in <module>\r\n from chia.wallet.puzzles.clawback.metadata import ClawbackMetadata, ClawbackVersion\r\nModuleNotFoundError: No module named 'chia.wallet.puzzles.clawback'\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\ndependencies = [\n \"aiofiles==23.1.0\", # Async IO for files\n \"anyio==3.6.2\",\n \"boto3==1.26.148\", # AWS S3 for DL s3 plugin\n \"blspy==1.0.16\", # Signature library\n \"chiavdf==1.0.8\", # timelord and vdf verification\n \"chiabip158==1.2\", # bip158-style wallet filters\n \"chiapos==1.0.11\", # proof of space\n \"clvm==0.9.7\",\n \"clvm_tools==0.4.6\", # Currying, Program.to, other conveniences\n \"chia_rs==0.2.7\",\n \"clvm-tools-rs==0.1.34\", # Rust implementation of clvm_tools' compiler\n \"aiohttp==3.8.4\", # HTTP server for full node rpc\n \"aiosqlite==0.19.0\", # asyncio wrapper for sqlite, to store blocks\n \"bitstring==4.0.2\", # Binary data management library\n \"colorama==0.4.6\", # Colorizes terminal output\n \"colorlog==6.7.0\", # Adds color to logs\n \"concurrent-log-handler==0.9.24\", # Concurrently log and rotate logs\n \"cryptography==41.0.0\", # Python cryptography library for TLS - keyring conflict\n \"filelock==3.12.0\", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)\n \"keyring==23.13.1\", # Store keys in MacOS Keychain, Windows Credential Locker\n \"PyYAML==6.0\", # Used for config file format\n \"setproctitle==1.3.2\", # Gives the chia processes readable names\n \"sortedcontainers==2.4.0\", # For maintaining sorted mempools\n \"click==8.1.3\", # For the CLI\n \"dnspython==2.3.0\", # Query DNS seeds\n \"watchdog==2.2.0\", # Filesystem event watching - watches keyring.yaml\n \"dnslib==0.9.23\", # dns lib\n \"typing-extensions==4.6.0\", # typing backports like Protocol and TypedDict\n \"zstd==1.5.5.1\",\n \"packaging==23.1\",\n \"psutil==5.9.4\",\n]\n\nupnp_dependencies = [\n \"miniupnpc==2.2.2\", # Allows users to open ports on their router\n]\n\ndev_dependencies = [\n \"build\",\n # >=7.2.4 for https://github.com/nedbat/coveragepy/issues/1604\n \"coverage>=7.2.4\",\n \"diff-cover\",\n \"pre-commit\",\n \"py3createtorrent\",\n \"pylint\",\n \"pytest\",\n \"pytest-asyncio>=0.18.1\", # require attribute 'fixture'\n \"pytest-cov\",\n \"pytest-monitor; sys_platform == 'linux'\",\n \"pytest-xdist\",\n \"twine\",\n \"isort\",\n \"flake8\",\n \"mypy\",\n \"black==23.3.0\",\n \"aiohttp_cors\", # For blackd\n \"ipython\", # For asyncio debugging\n \"pyinstaller==5.11.0\",\n \"types-aiofiles\",\n \"types-cryptography\",\n \"types-pkg_resources\",\n \"types-pyyaml\",\n \"types-setuptools\",\n]\n\nlegacy_keyring_dependencies = [\n \"keyrings.cryptfile==1.3.9\",\n]\n\nkwargs = dict(\n name=\"chia-blockchain\",\n author=\"Mariano Sorgente\",\n author_email=\"[email protected]\",\n description=\"Chia blockchain full node, farmer, timelord, and wallet.\",\n url=\"https://chia.net/\",\n license=\"Apache License\",\n python_requires=\">=3.7, <4\",\n keywords=\"chia blockchain node\",\n install_requires=dependencies,\n extras_require=dict(\n dev=dev_dependencies,\n upnp=upnp_dependencies,\n legacy_keyring=legacy_keyring_dependencies,\n ),\n packages=[\n \"build_scripts\",\n \"chia\",\n \"chia.cmds\",\n \"chia.clvm\",\n \"chia.consensus\",\n \"chia.daemon\",\n \"chia.data_layer\",\n \"chia.full_node\",\n \"chia.timelord\",\n \"chia.farmer\",\n \"chia.harvester\",\n \"chia.introducer\",\n \"chia.plot_sync\",\n \"chia.plotters\",\n \"chia.plotting\",\n \"chia.pools\",\n \"chia.protocols\",\n \"chia.rpc\",\n \"chia.seeder\",\n \"chia.server\",\n \"chia.simulator\",\n \"chia.types.blockchain_format\",\n \"chia.types\",\n \"chia.util\",\n \"chia.wallet\",\n \"chia.wallet.db_wallet\",\n \"chia.wallet.puzzles\",\n \"chia.wallet.cat_wallet\",\n \"chia.wallet.did_wallet\",\n \"chia.wallet.nft_wallet\",\n \"chia.wallet.trading\",\n \"chia.wallet.util\",\n \"chia.wallet.vc_wallet\",\n \"chia.wallet.vc_wallet.vc_puzzles\",\n \"chia.wallet.vc_wallet.cr_puzzles\",\n \"chia.ssl\",\n \"mozilla-ca\",\n ],\n entry_points={\n \"console_scripts\": [\n \"chia = chia.cmds.chia:main\",\n \"chia_daemon = chia.daemon.server:main\",\n \"chia_wallet = chia.server.start_wallet:main\",\n \"chia_full_node = chia.server.start_full_node:main\",\n \"chia_harvester = chia.server.start_harvester:main\",\n \"chia_farmer = chia.server.start_farmer:main\",\n \"chia_introducer = chia.server.start_introducer:main\",\n \"chia_crawler = chia.seeder.start_crawler:main\",\n \"chia_seeder = chia.seeder.dns_server:main\",\n \"chia_timelord = chia.server.start_timelord:main\",\n \"chia_timelord_launcher = chia.timelord.timelord_launcher:main\",\n \"chia_full_node_simulator = chia.simulator.start_simulator:main\",\n \"chia_data_layer = chia.server.start_data_layer:main\",\n \"chia_data_layer_http = chia.data_layer.data_layer_server:main\",\n \"chia_data_layer_s3_plugin = chia.data_layer.s3_plugin_service:run_server\",\n ]\n },\n package_data={\n \"chia\": [\"pyinstaller.spec\"],\n \"\": [\"*.clsp\", \"*.clsp.hex\", \"*.clvm\", \"*.clib\", \"py.typed\"],\n \"chia.util\": [\"initial-*.yaml\", \"english.txt\"],\n \"chia.ssl\": [\"chia_ca.crt\", \"chia_ca.key\", \"dst_root_ca.pem\"],\n \"mozilla-ca\": [\"cacert.pem\"],\n },\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n zip_safe=False,\n project_urls={\n \"Source\": \"https://github.com/Chia-Network/chia-blockchain/\",\n \"Changelog\": \"https://github.com/Chia-Network/chia-blockchain/blob/main/CHANGELOG.md\",\n },\n)\n\nif \"setup_file\" in sys.modules:\n # include dev deps in regular deps when run in snyk\n dependencies.extend(dev_dependencies)\n\nif len(os.environ.get(\"CHIA_SKIP_SETUP\", \"\")) < 1:\n setup(**kwargs) # type: ignore\n", "path": "setup.py"}]}
| 3,182 | 99 |
gh_patches_debug_26616
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-3859
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Button behavior has a property MIN_STATE_TIME
This property can't be used in kv cause of the uppercase.
</issue>
<code>
[start of kivy/uix/behaviors/button.py]
1 '''See :class:`ButtonBehavior` for details.
2 '''
3
4 __all__ = ('ButtonBehavior', )
5
6 from kivy.clock import Clock
7 from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty
8 from time import time
9
10
11 class ButtonBehavior(object):
12 '''
13 This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
14 :class:`~kivy.uix.button.Button` behavior.
15
16 :Events:
17 `on_press`
18 Fired when the button is pressed.
19 `on_release`
20 Fired when the button is released (i.e. the touch/click that
21 pressed the button goes away).
22 '''
23
24 state = OptionProperty('normal', options=('normal', 'down'))
25 '''The state of the button, must be one of 'normal' or 'down'.
26 The state is 'down' only when the button is currently touched/clicked,
27 otherwise its 'normal'.
28
29 :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
30 to 'normal'.
31 '''
32
33 last_touch = ObjectProperty(None)
34 '''Contains the last relevant touch received by the Button. This can
35 be used in `on_press` or `on_release` in order to know which touch
36 dispatched the event.
37
38 .. versionadded:: 1.8.0
39
40 :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and
41 defaults to `None`.
42 '''
43
44 MIN_STATE_TIME = 0.035
45 '''The minimum period of time which the widget must remain in the
46 `'down'` state.
47
48 :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.
49 '''
50
51 always_release = BooleanProperty(True)
52 '''This determines whether or not the widget fires an `on_release` event if
53 the touch_up is outside the widget.
54
55 .. versionadded:: 1.9.0
56
57 :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and
58 defaults to `True`.
59 '''
60
61 def __init__(self, **kwargs):
62 self.register_event_type('on_press')
63 self.register_event_type('on_release')
64 super(ButtonBehavior, self).__init__(**kwargs)
65 self.__state_event = None
66 self.__touch_time = None
67 self.fbind('state', self.cancel_event)
68
69 def _do_press(self):
70 self.state = 'down'
71
72 def _do_release(self, *args):
73 self.state = 'normal'
74
75 def cancel_event(self, *args):
76 if self.__state_event:
77 self.__state_event.cancel()
78 self.__state_event = None
79
80 def on_touch_down(self, touch):
81 if super(ButtonBehavior, self).on_touch_down(touch):
82 return True
83 if touch.is_mouse_scrolling:
84 return False
85 if not self.collide_point(touch.x, touch.y):
86 return False
87 if self in touch.ud:
88 return False
89 touch.grab(self)
90 touch.ud[self] = True
91 self.last_touch = touch
92 self.__touch_time = time()
93 self._do_press()
94 self.dispatch('on_press')
95 return True
96
97 def on_touch_move(self, touch):
98 if touch.grab_current is self:
99 return True
100 if super(ButtonBehavior, self).on_touch_move(touch):
101 return True
102 return self in touch.ud
103
104 def on_touch_up(self, touch):
105 if touch.grab_current is not self:
106 return super(ButtonBehavior, self).on_touch_up(touch)
107 assert(self in touch.ud)
108 touch.ungrab(self)
109 self.last_touch = touch
110
111 if (not self.always_release
112 and not self.collide_point(*touch.pos)):
113 self.state = 'normal'
114 return
115
116 touchtime = time() - self.__touch_time
117 if touchtime < self.MIN_STATE_TIME:
118 self.__state_event = Clock.schedule_once(
119 self._do_release, self.MIN_STATE_TIME - touchtime)
120 else:
121 self._do_release()
122 self.dispatch('on_release')
123 return True
124
125 def on_press(self):
126 pass
127
128 def on_release(self):
129 pass
130
131 def trigger_action(self, duration=0.1):
132 '''Trigger whatever action(s) have been bound to the button by calling
133 both the on_press and on_release callbacks.
134
135 This simulates a quick button press without using any touch events.
136
137 Duration is the length of the press in seconds. Pass 0 if you want
138 the action to happen instantly.
139
140 .. versionadded:: 1.8.0
141 '''
142 self._do_press()
143 self.dispatch('on_press')
144
145 def trigger_release(dt):
146 self._do_release()
147 self.dispatch('on_release')
148 if not duration:
149 trigger_release(0)
150 else:
151 Clock.schedule_once(trigger_release, duration)
152
[end of kivy/uix/behaviors/button.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/uix/behaviors/button.py b/kivy/uix/behaviors/button.py
--- a/kivy/uix/behaviors/button.py
+++ b/kivy/uix/behaviors/button.py
@@ -4,8 +4,10 @@
__all__ = ('ButtonBehavior', )
from kivy.clock import Clock
-from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty
+from kivy.properties import OptionProperty, ObjectProperty, \
+ BooleanProperty, NumericProperty, AliasProperty
from time import time
+from kivy.logger import Logger
class ButtonBehavior(object):
@@ -41,13 +43,26 @@
defaults to `None`.
'''
- MIN_STATE_TIME = 0.035
+ min_state_time = NumericProperty(0.035)
'''The minimum period of time which the widget must remain in the
`'down'` state.
- :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.
+ :attr:`min_state_time` is a float and defaults to 0.035.
'''
+ def _set_min_state_time(self, value):
+ Logger.warning(
+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')
+ self.min_state_time = value
+
+ def _get_min_state_time(self):
+ Logger.warning(
+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')
+ return self.min_state_time
+
+ MIN_STATE_TIME = AliasProperty(
+ _get_min_state_time, _set_min_state_time, bind=('min_state_time', ))
+
always_release = BooleanProperty(True)
'''This determines whether or not the widget fires an `on_release` event if
the touch_up is outside the widget.
|
{"golden_diff": "diff --git a/kivy/uix/behaviors/button.py b/kivy/uix/behaviors/button.py\n--- a/kivy/uix/behaviors/button.py\n+++ b/kivy/uix/behaviors/button.py\n@@ -4,8 +4,10 @@\n __all__ = ('ButtonBehavior', )\n \n from kivy.clock import Clock\n-from kivy.properties import OptionProperty, ObjectProperty, BooleanProperty\n+from kivy.properties import OptionProperty, ObjectProperty, \\\n+ BooleanProperty, NumericProperty, AliasProperty\n from time import time\n+from kivy.logger import Logger\n \n \n class ButtonBehavior(object):\n@@ -41,13 +43,26 @@\n defaults to `None`.\n '''\n \n- MIN_STATE_TIME = 0.035\n+ min_state_time = NumericProperty(0.035)\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n \n- :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.\n+ :attr:`min_state_time` is a float and defaults to 0.035.\n '''\n \n+ def _set_min_state_time(self, value):\n+ Logger.warning(\n+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')\n+ self.min_state_time = value\n+\n+ def _get_min_state_time(self):\n+ Logger.warning(\n+ 'MIN_STATE_TIME is deprecated, use min_state_time instead')\n+ return self.min_state_time\n+\n+ MIN_STATE_TIME = AliasProperty(\n+ _get_min_state_time, _set_min_state_time, bind=('min_state_time', ))\n+\n always_release = BooleanProperty(True)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n", "issue": "Button behavior has a property MIN_STATE_TIME\nThis property can't be used in kv cause of the uppercase.\n\n", "before_files": [{"content": "'''See :class:`ButtonBehavior` for details.\n'''\n\n__all__ = ('ButtonBehavior', )\n\nfrom kivy.clock import Clock\nfrom kivy.properties import OptionProperty, ObjectProperty, BooleanProperty\nfrom time import time\n\n\nclass ButtonBehavior(object):\n '''\n This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides\n :class:`~kivy.uix.button.Button` behavior.\n\n :Events:\n `on_press`\n Fired when the button is pressed.\n `on_release`\n Fired when the button is released (i.e. the touch/click that\n pressed the button goes away).\n '''\n\n state = OptionProperty('normal', options=('normal', 'down'))\n '''The state of the button, must be one of 'normal' or 'down'.\n The state is 'down' only when the button is currently touched/clicked,\n otherwise its 'normal'.\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'normal'.\n '''\n\n last_touch = ObjectProperty(None)\n '''Contains the last relevant touch received by the Button. This can\n be used in `on_press` or `on_release` in order to know which touch\n dispatched the event.\n\n .. versionadded:: 1.8.0\n\n :attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and\n defaults to `None`.\n '''\n\n MIN_STATE_TIME = 0.035\n '''The minimum period of time which the widget must remain in the\n `'down'` state.\n\n :attr:`MIN_STATE_TIME` is a float and defaults to 0.035.\n '''\n\n always_release = BooleanProperty(True)\n '''This determines whether or not the widget fires an `on_release` event if\n the touch_up is outside the widget.\n\n .. versionadded:: 1.9.0\n\n :attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to `True`.\n '''\n\n def __init__(self, **kwargs):\n self.register_event_type('on_press')\n self.register_event_type('on_release')\n super(ButtonBehavior, self).__init__(**kwargs)\n self.__state_event = None\n self.__touch_time = None\n self.fbind('state', self.cancel_event)\n\n def _do_press(self):\n self.state = 'down'\n\n def _do_release(self, *args):\n self.state = 'normal'\n\n def cancel_event(self, *args):\n if self.__state_event:\n self.__state_event.cancel()\n self.__state_event = None\n\n def on_touch_down(self, touch):\n if super(ButtonBehavior, self).on_touch_down(touch):\n return True\n if touch.is_mouse_scrolling:\n return False\n if not self.collide_point(touch.x, touch.y):\n return False\n if self in touch.ud:\n return False\n touch.grab(self)\n touch.ud[self] = True\n self.last_touch = touch\n self.__touch_time = time()\n self._do_press()\n self.dispatch('on_press')\n return True\n\n def on_touch_move(self, touch):\n if touch.grab_current is self:\n return True\n if super(ButtonBehavior, self).on_touch_move(touch):\n return True\n return self in touch.ud\n\n def on_touch_up(self, touch):\n if touch.grab_current is not self:\n return super(ButtonBehavior, self).on_touch_up(touch)\n assert(self in touch.ud)\n touch.ungrab(self)\n self.last_touch = touch\n\n if (not self.always_release\n and not self.collide_point(*touch.pos)):\n self.state = 'normal'\n return\n\n touchtime = time() - self.__touch_time\n if touchtime < self.MIN_STATE_TIME:\n self.__state_event = Clock.schedule_once(\n self._do_release, self.MIN_STATE_TIME - touchtime)\n else:\n self._do_release()\n self.dispatch('on_release')\n return True\n\n def on_press(self):\n pass\n\n def on_release(self):\n pass\n\n def trigger_action(self, duration=0.1):\n '''Trigger whatever action(s) have been bound to the button by calling\n both the on_press and on_release callbacks.\n\n This simulates a quick button press without using any touch events.\n\n Duration is the length of the press in seconds. Pass 0 if you want\n the action to happen instantly.\n\n .. versionadded:: 1.8.0\n '''\n self._do_press()\n self.dispatch('on_press')\n\n def trigger_release(dt):\n self._do_release()\n self.dispatch('on_release')\n if not duration:\n trigger_release(0)\n else:\n Clock.schedule_once(trigger_release, duration)\n", "path": "kivy/uix/behaviors/button.py"}]}
| 1,982 | 401 |
gh_patches_debug_33958
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-97
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add GAE Signer
</issue>
<code>
[start of google/auth/app_engine.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google App Engine standard environment credentials.
16
17 This module provides authentication for application running on App Engine in
18 the standard environment using the `App Identity API`_.
19
20
21 .. _App Identity API:
22 https://cloud.google.com/appengine/docs/python/appidentity/
23 """
24
25 import datetime
26
27 from google.auth import _helpers
28 from google.auth import credentials
29
30 try:
31 from google.appengine.api import app_identity
32 except ImportError:
33 app_identity = None
34
35
36 def get_project_id():
37 """Gets the project ID for the current App Engine application.
38
39 Returns:
40 str: The project ID
41
42 Raises:
43 EnvironmentError: If the App Engine APIs are unavailable.
44 """
45 if app_identity is None:
46 raise EnvironmentError(
47 'The App Engine APIs are not available.')
48 return app_identity.get_application_id()
49
50
51 class Credentials(credentials.Scoped, credentials.Signing,
52 credentials.Credentials):
53 """App Engine standard environment credentials.
54
55 These credentials use the App Engine App Identity API to obtain access
56 tokens.
57 """
58
59 def __init__(self, scopes=None, service_account_id=None):
60 """
61 Args:
62 scopes (Sequence[str]): Scopes to request from the App Identity
63 API.
64 service_account_id (str): The service account ID passed into
65 :func:`google.appengine.api.app_identity.get_access_token`.
66 If not specified, the default application service account
67 ID will be used.
68
69 Raises:
70 EnvironmentError: If the App Engine APIs are unavailable.
71 """
72 if app_identity is None:
73 raise EnvironmentError(
74 'The App Engine APIs are not available.')
75
76 super(Credentials, self).__init__()
77 self._scopes = scopes
78 self._service_account_id = service_account_id
79
80 @_helpers.copy_docstring(credentials.Credentials)
81 def refresh(self, request):
82 # pylint: disable=unused-argument
83 token, ttl = app_identity.get_access_token(
84 self._scopes, self._service_account_id)
85 expiry = _helpers.utcnow() + datetime.timedelta(seconds=ttl)
86
87 self.token, self.expiry = token, expiry
88
89 @property
90 def service_account_email(self):
91 """The service account email."""
92 if self._service_account_id is None:
93 self._service_account_id = app_identity.get_service_account_name()
94 return self._service_account_id
95
96 @property
97 def requires_scopes(self):
98 """Checks if the credentials requires scopes.
99
100 Returns:
101 bool: True if there are no scopes set otherwise False.
102 """
103 return not self._scopes
104
105 @_helpers.copy_docstring(credentials.Scoped)
106 def with_scopes(self, scopes):
107 return Credentials(
108 scopes=scopes, service_account_id=self._service_account_id)
109
110 @_helpers.copy_docstring(credentials.Signing)
111 def sign_bytes(self, message):
112 return app_identity.sign_blob(message)
113
114 @property
115 @_helpers.copy_docstring(credentials.Signing)
116 def signer_email(self):
117 return self.service_account_email
118
[end of google/auth/app_engine.py]
[start of google/auth/crypt.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Cryptography helpers for verifying and signing messages.
16
17 Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
18 to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
19 certificates. There is no support for p12 files.
20
21 The simplest way to verify signatures is using :func:`verify_signature`::
22
23 cert = open('certs.pem').read()
24 valid = crypt.verify_signature(message, signature, cert)
25
26 If you're going to verify many messages with the same certificate, you can use
27 :class:`Verifier`::
28
29 cert = open('certs.pem').read()
30 verifier = crypt.Verifier.from_string(cert)
31 valid = verifier.verify(message, signature)
32
33
34 To sign messages use :class:`Signer` with a private key::
35
36 private_key = open('private_key.pem').read()
37 signer = crypt.Signer(private_key)
38 signature = signer.sign(message)
39
40 """
41
42 from pyasn1.codec.der import decoder
43 from pyasn1_modules import pem
44 from pyasn1_modules.rfc2459 import Certificate
45 from pyasn1_modules.rfc5208 import PrivateKeyInfo
46 import rsa
47 import six
48
49 from google.auth import _helpers
50
51 _POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
52 _CERTIFICATE_MARKER = b'-----BEGIN CERTIFICATE-----'
53 _PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
54 '-----END RSA PRIVATE KEY-----')
55 _PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
56 '-----END PRIVATE KEY-----')
57 _PKCS8_SPEC = PrivateKeyInfo()
58
59
60 def _bit_list_to_bytes(bit_list):
61 """Converts an iterable of 1s and 0s to bytes.
62
63 Combines the list 8 at a time, treating each group of 8 bits
64 as a single byte.
65
66 Args:
67 bit_list (Sequence): Sequence of 1s and 0s.
68
69 Returns:
70 bytes: The decoded bytes.
71 """
72 num_bits = len(bit_list)
73 byte_vals = bytearray()
74 for start in six.moves.xrange(0, num_bits, 8):
75 curr_bits = bit_list[start:start + 8]
76 char_val = sum(val * digit
77 for val, digit in six.moves.zip(_POW2, curr_bits))
78 byte_vals.append(char_val)
79 return bytes(byte_vals)
80
81
82 class Verifier(object):
83 """This object is used to verify cryptographic signatures.
84
85 Args:
86 public_key (rsa.key.PublicKey): The public key used to verify
87 signatures.
88 """
89
90 def __init__(self, public_key):
91 self._pubkey = public_key
92
93 def verify(self, message, signature):
94 """Verifies a message against a cryptographic signature.
95
96 Args:
97 message (Union[str, bytes]): The message to verify.
98 signature (Union[str, bytes]): The cryptography signature to check.
99
100 Returns:
101 bool: True if message was signed by the private key associated
102 with the public key that this object was constructed with.
103 """
104 message = _helpers.to_bytes(message)
105 try:
106 return rsa.pkcs1.verify(message, signature, self._pubkey)
107 except (ValueError, rsa.pkcs1.VerificationError):
108 return False
109
110 @classmethod
111 def from_string(cls, public_key):
112 """Construct an Verifier instance from a public key or public
113 certificate string.
114
115 Args:
116 public_key (Union[str, bytes]): The public key in PEM format or the
117 x509 public key certificate.
118
119 Returns:
120 Verifier: The constructed verifier.
121
122 Raises:
123 ValueError: If the public_key can't be parsed.
124 """
125 public_key = _helpers.to_bytes(public_key)
126 is_x509_cert = _CERTIFICATE_MARKER in public_key
127
128 # If this is a certificate, extract the public key info.
129 if is_x509_cert:
130 der = rsa.pem.load_pem(public_key, 'CERTIFICATE')
131 asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
132 if remaining != b'':
133 raise ValueError('Unused bytes', remaining)
134
135 cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
136 key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
137 pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
138 else:
139 pubkey = rsa.PublicKey.load_pkcs1(public_key, 'PEM')
140 return cls(pubkey)
141
142
143 def verify_signature(message, signature, certs):
144 """Verify a cryptographic signature.
145
146 Checks that the provided ``signature`` was generated from ``bytes`` using
147 the private key associated with the ``cert``.
148
149 Args:
150 message (Union[str, bytes]): The plaintext message.
151 signature (Union[str, bytes]): The cryptographic signature to check.
152 certs (Union[Sequence, str, bytes]): The certificate or certificates
153 to use to check the signature.
154
155 Returns:
156 bool: True if the signature is valid, otherwise False.
157 """
158 if isinstance(certs, (six.text_type, six.binary_type)):
159 certs = [certs]
160
161 for cert in certs:
162 verifier = Verifier.from_string(cert)
163 if verifier.verify(message, signature):
164 return True
165 return False
166
167
168 class Signer(object):
169 """Signs messages with a private key.
170
171 Args:
172 private_key (rsa.key.PrivateKey): The private key to sign with.
173 key_id (str): Optional key ID used to identify this private key. This
174 can be useful to associate the private key with its associated
175 public key or certificate.
176 """
177
178 def __init__(self, private_key, key_id=None):
179 self._key = private_key
180 self.key_id = key_id
181
182 def sign(self, message):
183 """Signs a message.
184
185 Args:
186 message (Union[str, bytes]): The message to be signed.
187
188 Returns:
189 bytes: The signature of the message for the given key.
190 """
191 message = _helpers.to_bytes(message)
192 return rsa.pkcs1.sign(message, self._key, 'SHA-256')
193
194 @classmethod
195 def from_string(cls, key, key_id=None):
196 """Construct an Signer instance from a private key in PEM format.
197
198 Args:
199 key (str): Private key in PEM format.
200 key_id (str): An optional key id used to identify the private key.
201
202 Returns:
203 Signer: The constructed signer.
204
205 Raises:
206 ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in
207 PEM format.
208 """
209 key = _helpers.from_bytes(key) # PEM expects str in Python 3
210 marker_id, key_bytes = pem.readPemBlocksFromFile(
211 six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
212
213 # Key is in pkcs1 format.
214 if marker_id == 0:
215 private_key = rsa.key.PrivateKey.load_pkcs1(
216 key_bytes, format='DER')
217 # Key is in pkcs8.
218 elif marker_id == 1:
219 key_info, remaining = decoder.decode(
220 key_bytes, asn1Spec=_PKCS8_SPEC)
221 if remaining != b'':
222 raise ValueError('Unused bytes', remaining)
223 private_key_info = key_info.getComponentByName('privateKey')
224 private_key = rsa.key.PrivateKey.load_pkcs1(
225 private_key_info.asOctets(), format='DER')
226 else:
227 raise ValueError('No key could be detected.')
228
229 return cls(private_key, key_id=key_id)
230
[end of google/auth/crypt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/auth/app_engine.py b/google/auth/app_engine.py
--- a/google/auth/app_engine.py
+++ b/google/auth/app_engine.py
@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Google App Engine standard environment credentials.
+"""Google App Engine standard environment support.
-This module provides authentication for application running on App Engine in
-the standard environment using the `App Identity API`_.
+This module provides authentication and signing for applications running on App
+Engine in the standard environment using the `App Identity API`_.
.. _App Identity API:
@@ -33,6 +33,29 @@
app_identity = None
+class Signer(object):
+ """Signs messages using the App Engine app identity service.
+
+ This can be used in place of :class:`google.auth.crypt.Signer` when
+ running in the App Engine standard environment.
+ """
+ def __init__(self):
+ self.key_id = None
+
+ @staticmethod
+ def sign(message):
+ """Signs a message.
+
+ Args:
+ message (Union[str, bytes]): The message to be signed.
+
+ Returns:
+ bytes: The signature of the message.
+ """
+ message = _helpers.to_bytes(message)
+ return app_identity.sign_blob(message)
+
+
def get_project_id():
"""Gets the project ID for the current App Engine application.
@@ -109,7 +132,7 @@
@_helpers.copy_docstring(credentials.Signing)
def sign_bytes(self, message):
- return app_identity.sign_blob(message)
+ return Signer().sign(message)
@property
@_helpers.copy_docstring(credentials.Signing)
diff --git a/google/auth/crypt.py b/google/auth/crypt.py
--- a/google/auth/crypt.py
+++ b/google/auth/crypt.py
@@ -186,7 +186,7 @@
message (Union[str, bytes]): The message to be signed.
Returns:
- bytes: The signature of the message for the given key.
+ bytes: The signature of the message.
"""
message = _helpers.to_bytes(message)
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
|
{"golden_diff": "diff --git a/google/auth/app_engine.py b/google/auth/app_engine.py\n--- a/google/auth/app_engine.py\n+++ b/google/auth/app_engine.py\n@@ -12,10 +12,10 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\"\"\"Google App Engine standard environment credentials.\n+\"\"\"Google App Engine standard environment support.\n \n-This module provides authentication for application running on App Engine in\n-the standard environment using the `App Identity API`_.\n+This module provides authentication and signing for applications running on App\n+Engine in the standard environment using the `App Identity API`_.\n \n \n .. _App Identity API:\n@@ -33,6 +33,29 @@\n app_identity = None\n \n \n+class Signer(object):\n+ \"\"\"Signs messages using the App Engine app identity service.\n+\n+ This can be used in place of :class:`google.auth.crypt.Signer` when\n+ running in the App Engine standard environment.\n+ \"\"\"\n+ def __init__(self):\n+ self.key_id = None\n+\n+ @staticmethod\n+ def sign(message):\n+ \"\"\"Signs a message.\n+\n+ Args:\n+ message (Union[str, bytes]): The message to be signed.\n+\n+ Returns:\n+ bytes: The signature of the message.\n+ \"\"\"\n+ message = _helpers.to_bytes(message)\n+ return app_identity.sign_blob(message)\n+\n+\n def get_project_id():\n \"\"\"Gets the project ID for the current App Engine application.\n \n@@ -109,7 +132,7 @@\n \n @_helpers.copy_docstring(credentials.Signing)\n def sign_bytes(self, message):\n- return app_identity.sign_blob(message)\n+ return Signer().sign(message)\n \n @property\n @_helpers.copy_docstring(credentials.Signing)\ndiff --git a/google/auth/crypt.py b/google/auth/crypt.py\n--- a/google/auth/crypt.py\n+++ b/google/auth/crypt.py\n@@ -186,7 +186,7 @@\n message (Union[str, bytes]): The message to be signed.\n \n Returns:\n- bytes: The signature of the message for the given key.\n+ bytes: The signature of the message.\n \"\"\"\n message = _helpers.to_bytes(message)\n return rsa.pkcs1.sign(message, self._key, 'SHA-256')\n", "issue": "Add GAE Signer\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google App Engine standard environment credentials.\n\nThis module provides authentication for application running on App Engine in\nthe standard environment using the `App Identity API`_.\n\n\n.. _App Identity API:\n https://cloud.google.com/appengine/docs/python/appidentity/\n\"\"\"\n\nimport datetime\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\n\ntry:\n from google.appengine.api import app_identity\nexcept ImportError:\n app_identity = None\n\n\ndef get_project_id():\n \"\"\"Gets the project ID for the current App Engine application.\n\n Returns:\n str: The project ID\n\n Raises:\n EnvironmentError: If the App Engine APIs are unavailable.\n \"\"\"\n if app_identity is None:\n raise EnvironmentError(\n 'The App Engine APIs are not available.')\n return app_identity.get_application_id()\n\n\nclass Credentials(credentials.Scoped, credentials.Signing,\n credentials.Credentials):\n \"\"\"App Engine standard environment credentials.\n\n These credentials use the App Engine App Identity API to obtain access\n tokens.\n \"\"\"\n\n def __init__(self, scopes=None, service_account_id=None):\n \"\"\"\n Args:\n scopes (Sequence[str]): Scopes to request from the App Identity\n API.\n service_account_id (str): The service account ID passed into\n :func:`google.appengine.api.app_identity.get_access_token`.\n If not specified, the default application service account\n ID will be used.\n\n Raises:\n EnvironmentError: If the App Engine APIs are unavailable.\n \"\"\"\n if app_identity is None:\n raise EnvironmentError(\n 'The App Engine APIs are not available.')\n\n super(Credentials, self).__init__()\n self._scopes = scopes\n self._service_account_id = service_account_id\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n # pylint: disable=unused-argument\n token, ttl = app_identity.get_access_token(\n self._scopes, self._service_account_id)\n expiry = _helpers.utcnow() + datetime.timedelta(seconds=ttl)\n\n self.token, self.expiry = token, expiry\n\n @property\n def service_account_email(self):\n \"\"\"The service account email.\"\"\"\n if self._service_account_id is None:\n self._service_account_id = app_identity.get_service_account_name()\n return self._service_account_id\n\n @property\n def requires_scopes(self):\n \"\"\"Checks if the credentials requires scopes.\n\n Returns:\n bool: True if there are no scopes set otherwise False.\n \"\"\"\n return not self._scopes\n\n @_helpers.copy_docstring(credentials.Scoped)\n def with_scopes(self, scopes):\n return Credentials(\n scopes=scopes, service_account_id=self._service_account_id)\n\n @_helpers.copy_docstring(credentials.Signing)\n def sign_bytes(self, message):\n return app_identity.sign_blob(message)\n\n @property\n @_helpers.copy_docstring(credentials.Signing)\n def signer_email(self):\n return self.service_account_email\n", "path": "google/auth/app_engine.py"}, {"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cryptography helpers for verifying and signing messages.\n\nUses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages\nto parse PEM files storing PKCS#1 or PKCS#8 keys as well as\ncertificates. There is no support for p12 files.\n\nThe simplest way to verify signatures is using :func:`verify_signature`::\n\n cert = open('certs.pem').read()\n valid = crypt.verify_signature(message, signature, cert)\n\nIf you're going to verify many messages with the same certificate, you can use\n:class:`Verifier`::\n\n cert = open('certs.pem').read()\n verifier = crypt.Verifier.from_string(cert)\n valid = verifier.verify(message, signature)\n\n\nTo sign messages use :class:`Signer` with a private key::\n\n private_key = open('private_key.pem').read()\n signer = crypt.Signer(private_key)\n signature = signer.sign(message)\n\n\"\"\"\n\nfrom pyasn1.codec.der import decoder\nfrom pyasn1_modules import pem\nfrom pyasn1_modules.rfc2459 import Certificate\nfrom pyasn1_modules.rfc5208 import PrivateKeyInfo\nimport rsa\nimport six\n\nfrom google.auth import _helpers\n\n_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)\n_CERTIFICATE_MARKER = b'-----BEGIN CERTIFICATE-----'\n_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',\n '-----END RSA PRIVATE KEY-----')\n_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',\n '-----END PRIVATE KEY-----')\n_PKCS8_SPEC = PrivateKeyInfo()\n\n\ndef _bit_list_to_bytes(bit_list):\n \"\"\"Converts an iterable of 1s and 0s to bytes.\n\n Combines the list 8 at a time, treating each group of 8 bits\n as a single byte.\n\n Args:\n bit_list (Sequence): Sequence of 1s and 0s.\n\n Returns:\n bytes: The decoded bytes.\n \"\"\"\n num_bits = len(bit_list)\n byte_vals = bytearray()\n for start in six.moves.xrange(0, num_bits, 8):\n curr_bits = bit_list[start:start + 8]\n char_val = sum(val * digit\n for val, digit in six.moves.zip(_POW2, curr_bits))\n byte_vals.append(char_val)\n return bytes(byte_vals)\n\n\nclass Verifier(object):\n \"\"\"This object is used to verify cryptographic signatures.\n\n Args:\n public_key (rsa.key.PublicKey): The public key used to verify\n signatures.\n \"\"\"\n\n def __init__(self, public_key):\n self._pubkey = public_key\n\n def verify(self, message, signature):\n \"\"\"Verifies a message against a cryptographic signature.\n\n Args:\n message (Union[str, bytes]): The message to verify.\n signature (Union[str, bytes]): The cryptography signature to check.\n\n Returns:\n bool: True if message was signed by the private key associated\n with the public key that this object was constructed with.\n \"\"\"\n message = _helpers.to_bytes(message)\n try:\n return rsa.pkcs1.verify(message, signature, self._pubkey)\n except (ValueError, rsa.pkcs1.VerificationError):\n return False\n\n @classmethod\n def from_string(cls, public_key):\n \"\"\"Construct an Verifier instance from a public key or public\n certificate string.\n\n Args:\n public_key (Union[str, bytes]): The public key in PEM format or the\n x509 public key certificate.\n\n Returns:\n Verifier: The constructed verifier.\n\n Raises:\n ValueError: If the public_key can't be parsed.\n \"\"\"\n public_key = _helpers.to_bytes(public_key)\n is_x509_cert = _CERTIFICATE_MARKER in public_key\n\n # If this is a certificate, extract the public key info.\n if is_x509_cert:\n der = rsa.pem.load_pem(public_key, 'CERTIFICATE')\n asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())\n if remaining != b'':\n raise ValueError('Unused bytes', remaining)\n\n cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']\n key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])\n pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')\n else:\n pubkey = rsa.PublicKey.load_pkcs1(public_key, 'PEM')\n return cls(pubkey)\n\n\ndef verify_signature(message, signature, certs):\n \"\"\"Verify a cryptographic signature.\n\n Checks that the provided ``signature`` was generated from ``bytes`` using\n the private key associated with the ``cert``.\n\n Args:\n message (Union[str, bytes]): The plaintext message.\n signature (Union[str, bytes]): The cryptographic signature to check.\n certs (Union[Sequence, str, bytes]): The certificate or certificates\n to use to check the signature.\n\n Returns:\n bool: True if the signature is valid, otherwise False.\n \"\"\"\n if isinstance(certs, (six.text_type, six.binary_type)):\n certs = [certs]\n\n for cert in certs:\n verifier = Verifier.from_string(cert)\n if verifier.verify(message, signature):\n return True\n return False\n\n\nclass Signer(object):\n \"\"\"Signs messages with a private key.\n\n Args:\n private_key (rsa.key.PrivateKey): The private key to sign with.\n key_id (str): Optional key ID used to identify this private key. This\n can be useful to associate the private key with its associated\n public key or certificate.\n \"\"\"\n\n def __init__(self, private_key, key_id=None):\n self._key = private_key\n self.key_id = key_id\n\n def sign(self, message):\n \"\"\"Signs a message.\n\n Args:\n message (Union[str, bytes]): The message to be signed.\n\n Returns:\n bytes: The signature of the message for the given key.\n \"\"\"\n message = _helpers.to_bytes(message)\n return rsa.pkcs1.sign(message, self._key, 'SHA-256')\n\n @classmethod\n def from_string(cls, key, key_id=None):\n \"\"\"Construct an Signer instance from a private key in PEM format.\n\n Args:\n key (str): Private key in PEM format.\n key_id (str): An optional key id used to identify the private key.\n\n Returns:\n Signer: The constructed signer.\n\n Raises:\n ValueError: If the key cannot be parsed as PKCS#1 or PKCS#8 in\n PEM format.\n \"\"\"\n key = _helpers.from_bytes(key) # PEM expects str in Python 3\n marker_id, key_bytes = pem.readPemBlocksFromFile(\n six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)\n\n # Key is in pkcs1 format.\n if marker_id == 0:\n private_key = rsa.key.PrivateKey.load_pkcs1(\n key_bytes, format='DER')\n # Key is in pkcs8.\n elif marker_id == 1:\n key_info, remaining = decoder.decode(\n key_bytes, asn1Spec=_PKCS8_SPEC)\n if remaining != b'':\n raise ValueError('Unused bytes', remaining)\n private_key_info = key_info.getComponentByName('privateKey')\n private_key = rsa.key.PrivateKey.load_pkcs1(\n private_key_info.asOctets(), format='DER')\n else:\n raise ValueError('No key could be detected.')\n\n return cls(private_key, key_id=key_id)\n", "path": "google/auth/crypt.py"}]}
| 3,961 | 518 |
gh_patches_debug_40206
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-3072
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Behavior of `nx.union_all` when `graphs=[]`
I'm running into a case where I'm passing in an empty list to `nx.union_all`, and it returns a None type.
While this is not necessarily the wrong thing to do, it is not documented.
Intuitively, I would expect the result of union with no inputs to be an empty graph, but the issue here is that you don't know what type the graph should be. Therefore I think the best behavior would be to raise a ValueError indicating that the input cannot be empty. This would make it more clear where the code is failing.
Current behavior:
```python
>>> nx.union_all([nx.path_graph([1, 2])])
<networkx.classes.graph.Graph at 0x7f6fb15d1ac8>
>>> nx.union_all([nx.path_graph([1, 2]), nx.path_graph([3, 4])])
<networkx.classes.graph.Graph at 0x7f6fb1477ac8>
>>> print(nx.union_all([]))
None
```
Proposed Behavior:
```python
>>> print(nx.union_all([]))
ValueError: Cannot union_all an empty list
```
Behavior of `nx.union_all` when `graphs=[]`
I'm running into a case where I'm passing in an empty list to `nx.union_all`, and it returns a None type.
While this is not necessarily the wrong thing to do, it is not documented.
Intuitively, I would expect the result of union with no inputs to be an empty graph, but the issue here is that you don't know what type the graph should be. Therefore I think the best behavior would be to raise a ValueError indicating that the input cannot be empty. This would make it more clear where the code is failing.
Current behavior:
```python
>>> nx.union_all([nx.path_graph([1, 2])])
<networkx.classes.graph.Graph at 0x7f6fb15d1ac8>
>>> nx.union_all([nx.path_graph([1, 2]), nx.path_graph([3, 4])])
<networkx.classes.graph.Graph at 0x7f6fb1477ac8>
>>> print(nx.union_all([]))
None
```
Proposed Behavior:
```python
>>> print(nx.union_all([]))
ValueError: Cannot union_all an empty list
```
</issue>
<code>
[start of networkx/algorithms/operators/all.py]
1 """Operations on many graphs.
2 """
3 # Copyright (C) 2013 by
4 # Aric Hagberg <[email protected]>
5 # Dan Schult <[email protected]>
6 # Pieter Swart <[email protected]>
7 # All rights reserved.
8 # BSD license.
9 try:
10 from itertools import izip_longest as zip_longest
11 except ImportError: # Python3 has zip_longest
12 from itertools import zip_longest
13 import networkx as nx
14
15 __author__ = """\n""".join(['Robert King <[email protected]>',
16 'Aric Hagberg <[email protected]>'])
17
18 __all__ = ['union_all', 'compose_all', 'disjoint_union_all',
19 'intersection_all']
20
21
22 def union_all(graphs, rename=(None,)):
23 """Return the union of all graphs.
24
25 The graphs must be disjoint, otherwise an exception is raised.
26
27 Parameters
28 ----------
29 graphs : list of graphs
30 List of NetworkX graphs
31
32 rename : bool , default=(None, None)
33 Node names of G and H can be changed by specifying the tuple
34 rename=('G-','H-') (for example). Node "u" in G is then renamed
35 "G-u" and "v" in H is renamed "H-v".
36
37 Returns
38 -------
39 U : a graph with the same type as the first graph in list
40
41 Notes
42 -----
43 To force a disjoint union with node relabeling, use
44 disjoint_union_all(G,H) or convert_node_labels_to integers().
45
46 Graph, edge, and node attributes are propagated to the union graph.
47 If a graph attribute is present in multiple graphs, then the value
48 from the last graph in the list with that attribute is used.
49
50 See Also
51 --------
52 union
53 disjoint_union_all
54 """
55 graphs_names = zip_longest(graphs, rename)
56 U, gname = next(graphs_names)
57 for H, hname in graphs_names:
58 U = nx.union(U, H, (gname, hname))
59 gname = None
60 return U
61
62
63 def disjoint_union_all(graphs):
64 """Return the disjoint union of all graphs.
65
66 This operation forces distinct integer node labels starting with 0
67 for the first graph in the list and numbering consecutively.
68
69 Parameters
70 ----------
71 graphs : list
72 List of NetworkX graphs
73
74 Returns
75 -------
76 U : A graph with the same type as the first graph in list
77
78 Notes
79 -----
80 It is recommended that the graphs be either all directed or all undirected.
81
82 Graph, edge, and node attributes are propagated to the union graph.
83 If a graph attribute is present in multiple graphs, then the value
84 from the last graph in the list with that attribute is used.
85 """
86 graphs = iter(graphs)
87 U = next(graphs)
88 for H in graphs:
89 U = nx.disjoint_union(U, H)
90 return U
91
92
93 def compose_all(graphs):
94 """Return the composition of all graphs.
95
96 Composition is the simple union of the node sets and edge sets.
97 The node sets of the supplied graphs need not be disjoint.
98
99 Parameters
100 ----------
101 graphs : list
102 List of NetworkX graphs
103
104 Returns
105 -------
106 C : A graph with the same type as the first graph in list
107
108 Notes
109 -----
110 It is recommended that the supplied graphs be either all directed or all
111 undirected.
112
113 Graph, edge, and node attributes are propagated to the union graph.
114 If a graph attribute is present in multiple graphs, then the value
115 from the last graph in the list with that attribute is used.
116 """
117 graphs = iter(graphs)
118 C = next(graphs)
119 for H in graphs:
120 C = nx.compose(C, H)
121 return C
122
123
124 def intersection_all(graphs):
125 """Return a new graph that contains only the edges that exist in
126 all graphs.
127
128 All supplied graphs must have the same node set.
129
130 Parameters
131 ----------
132 graphs : list
133 List of NetworkX graphs
134
135 Returns
136 -------
137 R : A new graph with the same type as the first graph in list
138
139 Notes
140 -----
141 Attributes from the graph, nodes, and edges are not copied to the new
142 graph.
143 """
144 graphs = iter(graphs)
145 R = next(graphs)
146 for H in graphs:
147 R = nx.intersection(R, H)
148 return R
149
[end of networkx/algorithms/operators/all.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/networkx/algorithms/operators/all.py b/networkx/algorithms/operators/all.py
--- a/networkx/algorithms/operators/all.py
+++ b/networkx/algorithms/operators/all.py
@@ -38,6 +38,11 @@
-------
U : a graph with the same type as the first graph in list
+ Raises
+ ------
+ ValueError
+ If `graphs` is an empty list.
+
Notes
-----
To force a disjoint union with node relabeling, use
@@ -52,6 +57,8 @@
union
disjoint_union_all
"""
+ if not graphs:
+ raise ValueError('cannot apply union_all to an empty list')
graphs_names = zip_longest(graphs, rename)
U, gname = next(graphs_names)
for H, hname in graphs_names:
@@ -75,6 +82,11 @@
-------
U : A graph with the same type as the first graph in list
+ Raises
+ ------
+ ValueError
+ If `graphs` is an empty list.
+
Notes
-----
It is recommended that the graphs be either all directed or all undirected.
@@ -83,6 +95,8 @@
If a graph attribute is present in multiple graphs, then the value
from the last graph in the list with that attribute is used.
"""
+ if not graphs:
+ raise ValueError('cannot apply disjoint_union_all to an empty list')
graphs = iter(graphs)
U = next(graphs)
for H in graphs:
@@ -105,6 +119,11 @@
-------
C : A graph with the same type as the first graph in list
+ Raises
+ ------
+ ValueError
+ If `graphs` is an empty list.
+
Notes
-----
It is recommended that the supplied graphs be either all directed or all
@@ -114,6 +133,8 @@
If a graph attribute is present in multiple graphs, then the value
from the last graph in the list with that attribute is used.
"""
+ if not graphs:
+ raise ValueError('cannot apply compose_all to an empty list')
graphs = iter(graphs)
C = next(graphs)
for H in graphs:
@@ -136,11 +157,18 @@
-------
R : A new graph with the same type as the first graph in list
+ Raises
+ ------
+ ValueError
+ If `graphs` is an empty list.
+
Notes
-----
Attributes from the graph, nodes, and edges are not copied to the new
graph.
"""
+ if not graphs:
+ raise ValueError('cannot apply intersection_all to an empty list')
graphs = iter(graphs)
R = next(graphs)
for H in graphs:
|
{"golden_diff": "diff --git a/networkx/algorithms/operators/all.py b/networkx/algorithms/operators/all.py\n--- a/networkx/algorithms/operators/all.py\n+++ b/networkx/algorithms/operators/all.py\n@@ -38,6 +38,11 @@\n -------\n U : a graph with the same type as the first graph in list\n \n+ Raises\n+ ------\n+ ValueError\n+ If `graphs` is an empty list.\n+\n Notes\n -----\n To force a disjoint union with node relabeling, use\n@@ -52,6 +57,8 @@\n union\n disjoint_union_all\n \"\"\"\n+ if not graphs:\n+ raise ValueError('cannot apply union_all to an empty list')\n graphs_names = zip_longest(graphs, rename)\n U, gname = next(graphs_names)\n for H, hname in graphs_names:\n@@ -75,6 +82,11 @@\n -------\n U : A graph with the same type as the first graph in list\n \n+ Raises\n+ ------\n+ ValueError\n+ If `graphs` is an empty list.\n+\n Notes\n -----\n It is recommended that the graphs be either all directed or all undirected.\n@@ -83,6 +95,8 @@\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n \"\"\"\n+ if not graphs:\n+ raise ValueError('cannot apply disjoint_union_all to an empty list')\n graphs = iter(graphs)\n U = next(graphs)\n for H in graphs:\n@@ -105,6 +119,11 @@\n -------\n C : A graph with the same type as the first graph in list\n \n+ Raises\n+ ------\n+ ValueError\n+ If `graphs` is an empty list.\n+\n Notes\n -----\n It is recommended that the supplied graphs be either all directed or all\n@@ -114,6 +133,8 @@\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n \"\"\"\n+ if not graphs:\n+ raise ValueError('cannot apply compose_all to an empty list')\n graphs = iter(graphs)\n C = next(graphs)\n for H in graphs:\n@@ -136,11 +157,18 @@\n -------\n R : A new graph with the same type as the first graph in list\n \n+ Raises\n+ ------\n+ ValueError\n+ If `graphs` is an empty list.\n+\n Notes\n -----\n Attributes from the graph, nodes, and edges are not copied to the new\n graph.\n \"\"\"\n+ if not graphs:\n+ raise ValueError('cannot apply intersection_all to an empty list')\n graphs = iter(graphs)\n R = next(graphs)\n for H in graphs:\n", "issue": "Behavior of `nx.union_all` when `graphs=[]`\nI'm running into a case where I'm passing in an empty list to `nx.union_all`, and it returns a None type. \r\n\r\nWhile this is not necessarily the wrong thing to do, it is not documented.\r\n\r\nIntuitively, I would expect the result of union with no inputs to be an empty graph, but the issue here is that you don't know what type the graph should be. Therefore I think the best behavior would be to raise a ValueError indicating that the input cannot be empty. This would make it more clear where the code is failing. \r\n\r\nCurrent behavior:\r\n\r\n```python\r\n\r\n>>> nx.union_all([nx.path_graph([1, 2])])\r\n<networkx.classes.graph.Graph at 0x7f6fb15d1ac8>\r\n\r\n>>> nx.union_all([nx.path_graph([1, 2]), nx.path_graph([3, 4])])\r\n<networkx.classes.graph.Graph at 0x7f6fb1477ac8>\r\n\r\n>>> print(nx.union_all([]))\r\nNone\r\n\r\n```\r\n\r\nProposed Behavior:\r\n\r\n```python\r\n>>> print(nx.union_all([]))\r\nValueError: Cannot union_all an empty list\r\n```\nBehavior of `nx.union_all` when `graphs=[]`\nI'm running into a case where I'm passing in an empty list to `nx.union_all`, and it returns a None type. \r\n\r\nWhile this is not necessarily the wrong thing to do, it is not documented.\r\n\r\nIntuitively, I would expect the result of union with no inputs to be an empty graph, but the issue here is that you don't know what type the graph should be. Therefore I think the best behavior would be to raise a ValueError indicating that the input cannot be empty. This would make it more clear where the code is failing. \r\n\r\nCurrent behavior:\r\n\r\n```python\r\n\r\n>>> nx.union_all([nx.path_graph([1, 2])])\r\n<networkx.classes.graph.Graph at 0x7f6fb15d1ac8>\r\n\r\n>>> nx.union_all([nx.path_graph([1, 2]), nx.path_graph([3, 4])])\r\n<networkx.classes.graph.Graph at 0x7f6fb1477ac8>\r\n\r\n>>> print(nx.union_all([]))\r\nNone\r\n\r\n```\r\n\r\nProposed Behavior:\r\n\r\n```python\r\n>>> print(nx.union_all([]))\r\nValueError: Cannot union_all an empty list\r\n```\n", "before_files": [{"content": "\"\"\"Operations on many graphs.\n\"\"\"\n# Copyright (C) 2013 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\ntry:\n from itertools import izip_longest as zip_longest\nexcept ImportError: # Python3 has zip_longest\n from itertools import zip_longest\nimport networkx as nx\n\n__author__ = \"\"\"\\n\"\"\".join(['Robert King <[email protected]>',\n 'Aric Hagberg <[email protected]>'])\n\n__all__ = ['union_all', 'compose_all', 'disjoint_union_all',\n 'intersection_all']\n\n\ndef union_all(graphs, rename=(None,)):\n \"\"\"Return the union of all graphs.\n\n The graphs must be disjoint, otherwise an exception is raised.\n\n Parameters\n ----------\n graphs : list of graphs\n List of NetworkX graphs\n\n rename : bool , default=(None, None)\n Node names of G and H can be changed by specifying the tuple\n rename=('G-','H-') (for example). Node \"u\" in G is then renamed\n \"G-u\" and \"v\" in H is renamed \"H-v\".\n\n Returns\n -------\n U : a graph with the same type as the first graph in list\n\n Notes\n -----\n To force a disjoint union with node relabeling, use\n disjoint_union_all(G,H) or convert_node_labels_to integers().\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n\n See Also\n --------\n union\n disjoint_union_all\n \"\"\"\n graphs_names = zip_longest(graphs, rename)\n U, gname = next(graphs_names)\n for H, hname in graphs_names:\n U = nx.union(U, H, (gname, hname))\n gname = None\n return U\n\n\ndef disjoint_union_all(graphs):\n \"\"\"Return the disjoint union of all graphs.\n\n This operation forces distinct integer node labels starting with 0\n for the first graph in the list and numbering consecutively.\n\n Parameters\n ----------\n graphs : list\n List of NetworkX graphs\n\n Returns\n -------\n U : A graph with the same type as the first graph in list\n\n Notes\n -----\n It is recommended that the graphs be either all directed or all undirected.\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n \"\"\"\n graphs = iter(graphs)\n U = next(graphs)\n for H in graphs:\n U = nx.disjoint_union(U, H)\n return U\n\n\ndef compose_all(graphs):\n \"\"\"Return the composition of all graphs.\n\n Composition is the simple union of the node sets and edge sets.\n The node sets of the supplied graphs need not be disjoint.\n\n Parameters\n ----------\n graphs : list\n List of NetworkX graphs\n\n Returns\n -------\n C : A graph with the same type as the first graph in list\n\n Notes\n -----\n It is recommended that the supplied graphs be either all directed or all\n undirected.\n\n Graph, edge, and node attributes are propagated to the union graph.\n If a graph attribute is present in multiple graphs, then the value\n from the last graph in the list with that attribute is used.\n \"\"\"\n graphs = iter(graphs)\n C = next(graphs)\n for H in graphs:\n C = nx.compose(C, H)\n return C\n\n\ndef intersection_all(graphs):\n \"\"\"Return a new graph that contains only the edges that exist in\n all graphs.\n\n All supplied graphs must have the same node set.\n\n Parameters\n ----------\n graphs : list\n List of NetworkX graphs\n\n Returns\n -------\n R : A new graph with the same type as the first graph in list\n\n Notes\n -----\n Attributes from the graph, nodes, and edges are not copied to the new\n graph.\n \"\"\"\n graphs = iter(graphs)\n R = next(graphs)\n for H in graphs:\n R = nx.intersection(R, H)\n return R\n", "path": "networkx/algorithms/operators/all.py"}]}
| 2,377 | 637 |
gh_patches_debug_14403
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-4359
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure that all dbt invocations have an invocation_id
This is a sub-issue of #4260.
In order to make sure we can distinguish logs from different runs we should always have an invocation_id. If tracking is off, it isn't created today so we would need to create it. (to add it to logging, add it as a class property on the superclass) (previously considered: preserve process id like it is today, global run_id reported at start, or allow some sort of user markers).
</issue>
<code>
[start of core/dbt/lib.py]
1 # TODO: this file is one big TODO
2 import os
3 from dbt.exceptions import RuntimeException
4 from dbt import flags
5 from collections import namedtuple
6
7 RuntimeArgs = namedtuple(
8 'RuntimeArgs', 'project_dir profiles_dir single_threaded profile_name'
9 )
10
11
12 def get_dbt_config(project_dir, single_threaded=False):
13 from dbt.config.runtime import RuntimeConfig
14 import dbt.adapters.factory
15
16 if os.getenv('DBT_PROFILES_DIR'):
17 profiles_dir = os.getenv('DBT_PROFILES_DIR')
18 else:
19 profiles_dir = os.path.expanduser("~/.dbt")
20
21 # Construct a phony config
22 config = RuntimeConfig.from_args(RuntimeArgs(
23 project_dir, profiles_dir, single_threaded, 'user'
24 ))
25 # Clear previously registered adapters--
26 # this fixes cacheing behavior on the dbt-server
27 flags.set_from_args('', config)
28 dbt.adapters.factory.reset_adapters()
29 # Load the relevant adapter
30 dbt.adapters.factory.register_adapter(config)
31
32 return config
33
34
35 def get_task_by_type(type):
36 # TODO: we need to tell dbt-server what tasks are available
37 from dbt.task.run import RunTask
38 from dbt.task.list import ListTask
39 from dbt.task.seed import SeedTask
40 from dbt.task.test import TestTask
41 from dbt.task.build import BuildTask
42 from dbt.task.snapshot import SnapshotTask
43 from dbt.task.run_operation import RunOperationTask
44
45 if type == 'run':
46 return RunTask
47 elif type == 'test':
48 return TestTask
49 elif type == 'list':
50 return ListTask
51 elif type == 'seed':
52 return SeedTask
53 elif type == 'build':
54 return BuildTask
55 elif type == 'snapshot':
56 return SnapshotTask
57 elif type == 'run_operation':
58 return RunOperationTask
59
60 raise RuntimeException('not a valid task')
61
62
63 def create_task(type, args, manifest, config):
64 task = get_task_by_type(type)
65
66 def no_op(*args, **kwargs):
67 pass
68
69 # TODO: yuck, let's rethink tasks a little
70 task = task(args, config)
71
72 # Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest
73 task.load_manifest = no_op
74 task.manifest = manifest
75 return task
76
77
78 def _get_operation_node(manifest, project_path, sql):
79 from dbt.parser.manifest import process_node
80 from dbt.parser.sql import SqlBlockParser
81 import dbt.adapters.factory
82
83 config = get_dbt_config(project_path)
84 block_parser = SqlBlockParser(
85 project=config,
86 manifest=manifest,
87 root_project=config,
88 )
89
90 adapter = dbt.adapters.factory.get_adapter(config)
91 # TODO : This needs a real name?
92 sql_node = block_parser.parse_remote(sql, 'name')
93 process_node(config, manifest, sql_node)
94 return config, sql_node, adapter
95
96
97 def compile_sql(manifest, project_path, sql):
98 from dbt.task.sql import SqlCompileRunner
99
100 config, node, adapter = _get_operation_node(manifest, project_path, sql)
101 runner = SqlCompileRunner(config, adapter, node, 1, 1)
102 return runner.safe_run(manifest)
103
104
105 def execute_sql(manifest, project_path, sql):
106 from dbt.task.sql import SqlExecuteRunner
107
108 config, node, adapter = _get_operation_node(manifest, project_path, sql)
109 runner = SqlExecuteRunner(config, adapter, node, 1, 1)
110 # TODO: use same interface for runner
111 return runner.safe_run(manifest)
112
113
114 def parse_to_manifest(config):
115 from dbt.parser.manifest import ManifestLoader
116
117 return ManifestLoader.get_full_manifest(config)
118
119
120 def deserialize_manifest(manifest_msgpack):
121 from dbt.contracts.graph.manifest import Manifest
122
123 return Manifest.from_msgpack(manifest_msgpack)
124
125
126 def serialize_manifest(manifest):
127 # TODO: what should this take as an arg?
128 return manifest.to_msgpack()
129
[end of core/dbt/lib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/dbt/lib.py b/core/dbt/lib.py
--- a/core/dbt/lib.py
+++ b/core/dbt/lib.py
@@ -12,6 +12,7 @@
def get_dbt_config(project_dir, single_threaded=False):
from dbt.config.runtime import RuntimeConfig
import dbt.adapters.factory
+ import dbt.events.functions
if os.getenv('DBT_PROFILES_DIR'):
profiles_dir = os.getenv('DBT_PROFILES_DIR')
@@ -28,6 +29,8 @@
dbt.adapters.factory.reset_adapters()
# Load the relevant adapter
dbt.adapters.factory.register_adapter(config)
+ # Set invocation id
+ dbt.events.functions.set_invocation_id()
return config
|
{"golden_diff": "diff --git a/core/dbt/lib.py b/core/dbt/lib.py\n--- a/core/dbt/lib.py\n+++ b/core/dbt/lib.py\n@@ -12,6 +12,7 @@\n def get_dbt_config(project_dir, single_threaded=False):\n from dbt.config.runtime import RuntimeConfig\n import dbt.adapters.factory\n+ import dbt.events.functions\n \n if os.getenv('DBT_PROFILES_DIR'):\n profiles_dir = os.getenv('DBT_PROFILES_DIR')\n@@ -28,6 +29,8 @@\n dbt.adapters.factory.reset_adapters()\n # Load the relevant adapter\n dbt.adapters.factory.register_adapter(config)\n+ # Set invocation id\n+ dbt.events.functions.set_invocation_id()\n \n return config\n", "issue": "Ensure that all dbt invocations have an invocation_id\nThis is a sub-issue of #4260. \n\nIn order to make sure we can distinguish logs from different runs we should always have an invocation_id. If tracking is off, it isn't created today so we would need to create it. (to add it to logging, add it as a class property on the superclass) (previously considered: preserve process id like it is today, global run_id reported at start, or allow some sort of user markers).\n", "before_files": [{"content": "# TODO: this file is one big TODO\nimport os\nfrom dbt.exceptions import RuntimeException\nfrom dbt import flags\nfrom collections import namedtuple\n\nRuntimeArgs = namedtuple(\n 'RuntimeArgs', 'project_dir profiles_dir single_threaded profile_name'\n)\n\n\ndef get_dbt_config(project_dir, single_threaded=False):\n from dbt.config.runtime import RuntimeConfig\n import dbt.adapters.factory\n\n if os.getenv('DBT_PROFILES_DIR'):\n profiles_dir = os.getenv('DBT_PROFILES_DIR')\n else:\n profiles_dir = os.path.expanduser(\"~/.dbt\")\n\n # Construct a phony config\n config = RuntimeConfig.from_args(RuntimeArgs(\n project_dir, profiles_dir, single_threaded, 'user'\n ))\n # Clear previously registered adapters--\n # this fixes cacheing behavior on the dbt-server\n flags.set_from_args('', config)\n dbt.adapters.factory.reset_adapters()\n # Load the relevant adapter\n dbt.adapters.factory.register_adapter(config)\n\n return config\n\n\ndef get_task_by_type(type):\n # TODO: we need to tell dbt-server what tasks are available\n from dbt.task.run import RunTask\n from dbt.task.list import ListTask\n from dbt.task.seed import SeedTask\n from dbt.task.test import TestTask\n from dbt.task.build import BuildTask\n from dbt.task.snapshot import SnapshotTask\n from dbt.task.run_operation import RunOperationTask\n\n if type == 'run':\n return RunTask\n elif type == 'test':\n return TestTask\n elif type == 'list':\n return ListTask\n elif type == 'seed':\n return SeedTask\n elif type == 'build':\n return BuildTask\n elif type == 'snapshot':\n return SnapshotTask\n elif type == 'run_operation':\n return RunOperationTask\n\n raise RuntimeException('not a valid task')\n\n\ndef create_task(type, args, manifest, config):\n task = get_task_by_type(type)\n\n def no_op(*args, **kwargs):\n pass\n\n # TODO: yuck, let's rethink tasks a little\n task = task(args, config)\n\n # Wow! We can monkeypatch taskCls.load_manifest to return _our_ manifest\n task.load_manifest = no_op\n task.manifest = manifest\n return task\n\n\ndef _get_operation_node(manifest, project_path, sql):\n from dbt.parser.manifest import process_node\n from dbt.parser.sql import SqlBlockParser\n import dbt.adapters.factory\n\n config = get_dbt_config(project_path)\n block_parser = SqlBlockParser(\n project=config,\n manifest=manifest,\n root_project=config,\n )\n\n adapter = dbt.adapters.factory.get_adapter(config)\n # TODO : This needs a real name?\n sql_node = block_parser.parse_remote(sql, 'name')\n process_node(config, manifest, sql_node)\n return config, sql_node, adapter\n\n\ndef compile_sql(manifest, project_path, sql):\n from dbt.task.sql import SqlCompileRunner\n\n config, node, adapter = _get_operation_node(manifest, project_path, sql)\n runner = SqlCompileRunner(config, adapter, node, 1, 1)\n return runner.safe_run(manifest)\n\n\ndef execute_sql(manifest, project_path, sql):\n from dbt.task.sql import SqlExecuteRunner\n\n config, node, adapter = _get_operation_node(manifest, project_path, sql)\n runner = SqlExecuteRunner(config, adapter, node, 1, 1)\n # TODO: use same interface for runner\n return runner.safe_run(manifest)\n\n\ndef parse_to_manifest(config):\n from dbt.parser.manifest import ManifestLoader\n\n return ManifestLoader.get_full_manifest(config)\n\n\ndef deserialize_manifest(manifest_msgpack):\n from dbt.contracts.graph.manifest import Manifest\n\n return Manifest.from_msgpack(manifest_msgpack)\n\n\ndef serialize_manifest(manifest):\n # TODO: what should this take as an arg?\n return manifest.to_msgpack()\n", "path": "core/dbt/lib.py"}]}
| 1,807 | 166 |
gh_patches_debug_57288
|
rasdani/github-patches
|
git_diff
|
microsoft__DeepSpeed-5134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods
**Describe the bug**
TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers
**To Reproduce**
Steps to reproduce the behavior:
1. git clone https://github.com/OpenBMB/MiniCPM.git
2. follow setup step.
3. run `!bash lora_finetune.sh` via `lora_finetune.ipynb`
**Expected behavior**
runnable
**ds_report output**
Please run `ds_report` to give us details about your setup.
**Screenshots**
```
class CudaEventTimer(object):
File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/utils/timer.py", line 33, in CudaEventTimer
def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event):
^^^^^^^^^^^^^^^^^
File "/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/accelerator/real_accelerator.py", line 184, in get_accelerator
ds_accelerator = MPS_Accelerator()
^^^^^^^^^^^^^^^^^
TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers
```
**System info (please complete the following information):**
- OS: macOS 14.2.1 (23C71)
- metal
- Python 3.11.7
**Launcher context**
deepspeed
**Docker context**
no
**Additional context**
</issue>
<code>
[start of accelerator/mps_accelerator.py]
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 import torch
7
8 from .abstract_accelerator import DeepSpeedAccelerator
9
10 # During setup stage torch may not be installed, pass on no torch will
11 # allow op builder related API to be executed.
12 try:
13 import torch.mps
14 except ImportError:
15 pass
16
17
18 class MPS_Accelerator(DeepSpeedAccelerator):
19
20 def __init__(self):
21 self._name = "mps"
22 self._communication_backend_name = None
23
24 def is_synchronized_device(self):
25 return False
26
27 # Device APIs
28 def device_name(self, device_index=None):
29 if device_index is None:
30 return "mps"
31 return "mps:{}".format(device_index)
32
33 def device(self, device_index):
34 return torch.device("mps", index=0)
35
36 def set_device(self, device_index):
37 return
38
39 def current_device(self):
40 return torch.device("mps", index=0)
41
42 def current_device_name(self):
43 return "mps:0"
44
45 def device_count(self):
46 return 1
47
48 def synchronize(self, device_index=None):
49 return torch.mps.synchronize()
50
51 # RNG APIs
52 def random(self):
53 return torch.random
54
55 def set_rng_state(self, new_state, device_index=None):
56 return torch.mps.set_rng_state(new_state)
57
58 def get_rng_state(self, device_index=None):
59 return torch.mps.get_rng_state()
60
61 def manual_seed(self, seed):
62 return torch.mps.manual_seed(seed)
63
64 def manual_seed_all(self, seed):
65 return torch.mps.manual_seed(seed)
66
67 def seed(self):
68 return torch.mps.seed()
69
70 def initial_seed(self, seed):
71 return
72
73 def default_generator(self, device_index):
74 return
75
76 # Streams/Events
77 @property
78 def Stream(self):
79 return None
80
81 def stream(self, stream):
82 return None
83
84 def current_stream(self, device_index=None):
85 return None
86
87 def default_stream(self, device_index=None):
88 return None
89
90 @property
91 def Event(self):
92 return None
93
94 # Memory management
95 def empty_cache(self):
96 return torch.mps.empty_cache()
97
98 def memory_allocated(self, device_index=None):
99 return torch.mps.current_allocated_memory()
100
101 def max_memory_allocated(self, device_index=None):
102 return torch.mps.driver_allocated_memory()
103
104 def set_per_process_memory_fraction(self, fraction):
105 return torch.mps.set_per_process_memory_fraction(fraction)
106
107 def reset_max_memory_allocated(self, device_index=None):
108 return
109
110 def memory_cached(self, device_index=None):
111 return
112
113 def max_memory_cached(self, device_index=None):
114 return
115
116 def reset_max_memory_cached(self, device_index=None):
117 return
118
119 def memory_stats(self, device_index=None):
120 return
121
122 def reset_peak_memory_stats(self, device_index=None):
123 return
124
125 def memory_reserved(self, device_index=None):
126 return
127
128 def max_memory_reserved(self, device_index=None):
129 return
130
131 def total_memory(self, device_index=None):
132 return
133
134 def available_memory(self, device_index=None):
135 return
136
137 # Data types
138 def is_bf16_supported(self):
139 return False
140
141 def is_fp16_supported(self):
142 return False
143
144 def supported_dtypes(self):
145 return [torch.float]
146
147 # Misc
148 def amp(self):
149 return
150
151 def is_available(self):
152 return hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
153
154 def range_push(self, msg):
155 return
156
157 def range_pop(self):
158 return
159
160 def lazy_call(self, callback):
161 return
162
163 def communication_backend_name(self):
164 return self._communication_backend_name
165
166 def is_triton_supported(self):
167 return False
168
169 # Graph operations
170 def create_graph(self):
171 return None
172
173 def capture_to_graph(self, graph, pool=None, stream=None):
174 from deepspeed.runtime.utils import noop_context
175 return noop_context()
176
177 def replay_graph(self, graph):
178 return
179
180 # Tensor operations
181 @property
182 def BFloat16Tensor(self):
183 return
184
185 @property
186 def ByteTensor(self):
187 return
188
189 @property
190 def DoubleTensor(self):
191 return
192
193 @property
194 def FloatTensor(self):
195 return
196
197 @property
198 def HalfTensor(self):
199 return
200
201 @property
202 def IntTensor(self):
203 return
204
205 @property
206 def LongTensor(self):
207 return
208
209 def pin_memory(self, tensor, align_bytes=1):
210 return tensor.pin_memory()
211
212 def is_pinned(self, tensor):
213 return tensor.is_pinned()
214
215 def on_accelerator(self, tensor):
216 device_str = str(tensor.device)
217 if device_str.startswith("mps"):
218 return True
219 else:
220 return False
221
222 def op_builder_dir(self):
223 try:
224 # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
225 # if successful this also means we're doing a local install and not JIT compile path
226 from op_builder import __deepspeed__ # noqa: F401 # type: ignore
227
228 return "op_builder"
229 except ImportError:
230 return "deepspeed.ops.op_builder"
231
232 # create an instance of op builder, specified by class_name
233 def create_op_builder(self, op_name):
234 builder_class = self.get_op_builder(op_name)
235 if builder_class is not None:
236 return builder_class()
237 return None
238
239 # return an op builder class, specified by class_name
240 def get_op_builder(self, class_name):
241 from deepspeed.ops.op_builder.cpu import NotImplementedBuilder
242
243 return NotImplementedBuilder
244
245 def build_extension(self):
246 from torch.utils.cpp_extension import BuildExtension
247
248 return BuildExtension
249
250 def export_envs(self):
251 return []
252
[end of accelerator/mps_accelerator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py
--- a/accelerator/mps_accelerator.py
+++ b/accelerator/mps_accelerator.py
@@ -24,6 +24,15 @@
def is_synchronized_device(self):
return False
+ def use_host_timers(self):
+ return self.is_synchronized_device()
+
+ def resolves_data_dependency(self):
+ return self.is_synchronized_device()
+
+ def handles_memory_backpressure(self):
+ return self.is_synchronized_device()
+
# Device APIs
def device_name(self, device_index=None):
if device_index is None:
|
{"golden_diff": "diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py\n--- a/accelerator/mps_accelerator.py\n+++ b/accelerator/mps_accelerator.py\n@@ -24,6 +24,15 @@\n def is_synchronized_device(self):\n return False\n \n+ def use_host_timers(self):\n+ return self.is_synchronized_device()\n+\n+ def resolves_data_dependency(self):\n+ return self.is_synchronized_device()\n+\n+ def handles_memory_backpressure(self):\n+ return self.is_synchronized_device()\n+\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n", "issue": "[BUG] TypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods\n**Describe the bug**\r\nTypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. git clone https://github.com/OpenBMB/MiniCPM.git\r\n2. follow setup step.\r\n3. run `!bash lora_finetune.sh` via `lora_finetune.ipynb`\r\n\r\n**Expected behavior**\r\nrunnable\r\n\r\n**ds_report output**\r\nPlease run `ds_report` to give us details about your setup.\r\n\r\n**Screenshots**\r\n```\r\n class CudaEventTimer(object):\r\n File \"/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/utils/timer.py\", line 33, in CudaEventTimer\r\n def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event):\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/Users/katopz/git/MiniCPM/finetune/.venv/lib/python3.11/site-packages/deepspeed/accelerator/real_accelerator.py\", line 184, in get_accelerator\r\n ds_accelerator = MPS_Accelerator()\r\n ^^^^^^^^^^^^^^^^^\r\nTypeError: Can't instantiate abstract class MPS_Accelerator with abstract methods handles_memory_backpressure, resolves_data_dependency, use_host_timers\r\n```\r\n\r\n**System info (please complete the following information):**\r\n - OS: macOS 14.2.1 (23C71)\r\n - metal\r\n - Python 3.11.7\r\n\r\n**Launcher context**\r\ndeepspeed\r\n\r\n**Docker context**\r\nno\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\n\nfrom .abstract_accelerator import DeepSpeedAccelerator\n\n# During setup stage torch may not be installed, pass on no torch will\n# allow op builder related API to be executed.\ntry:\n import torch.mps\nexcept ImportError:\n pass\n\n\nclass MPS_Accelerator(DeepSpeedAccelerator):\n\n def __init__(self):\n self._name = \"mps\"\n self._communication_backend_name = None\n\n def is_synchronized_device(self):\n return False\n\n # Device APIs\n def device_name(self, device_index=None):\n if device_index is None:\n return \"mps\"\n return \"mps:{}\".format(device_index)\n\n def device(self, device_index):\n return torch.device(\"mps\", index=0)\n\n def set_device(self, device_index):\n return\n\n def current_device(self):\n return torch.device(\"mps\", index=0)\n\n def current_device_name(self):\n return \"mps:0\"\n\n def device_count(self):\n return 1\n\n def synchronize(self, device_index=None):\n return torch.mps.synchronize()\n\n # RNG APIs\n def random(self):\n return torch.random\n\n def set_rng_state(self, new_state, device_index=None):\n return torch.mps.set_rng_state(new_state)\n\n def get_rng_state(self, device_index=None):\n return torch.mps.get_rng_state()\n\n def manual_seed(self, seed):\n return torch.mps.manual_seed(seed)\n\n def manual_seed_all(self, seed):\n return torch.mps.manual_seed(seed)\n\n def seed(self):\n return torch.mps.seed()\n\n def initial_seed(self, seed):\n return\n\n def default_generator(self, device_index):\n return\n\n # Streams/Events\n @property\n def Stream(self):\n return None\n\n def stream(self, stream):\n return None\n\n def current_stream(self, device_index=None):\n return None\n\n def default_stream(self, device_index=None):\n return None\n\n @property\n def Event(self):\n return None\n\n # Memory management\n def empty_cache(self):\n return torch.mps.empty_cache()\n\n def memory_allocated(self, device_index=None):\n return torch.mps.current_allocated_memory()\n\n def max_memory_allocated(self, device_index=None):\n return torch.mps.driver_allocated_memory()\n\n def set_per_process_memory_fraction(self, fraction):\n return torch.mps.set_per_process_memory_fraction(fraction)\n\n def reset_max_memory_allocated(self, device_index=None):\n return\n\n def memory_cached(self, device_index=None):\n return\n\n def max_memory_cached(self, device_index=None):\n return\n\n def reset_max_memory_cached(self, device_index=None):\n return\n\n def memory_stats(self, device_index=None):\n return\n\n def reset_peak_memory_stats(self, device_index=None):\n return\n\n def memory_reserved(self, device_index=None):\n return\n\n def max_memory_reserved(self, device_index=None):\n return\n\n def total_memory(self, device_index=None):\n return\n\n def available_memory(self, device_index=None):\n return\n\n # Data types\n def is_bf16_supported(self):\n return False\n\n def is_fp16_supported(self):\n return False\n\n def supported_dtypes(self):\n return [torch.float]\n\n # Misc\n def amp(self):\n return\n\n def is_available(self):\n return hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n\n def range_push(self, msg):\n return\n\n def range_pop(self):\n return\n\n def lazy_call(self, callback):\n return\n\n def communication_backend_name(self):\n return self._communication_backend_name\n\n def is_triton_supported(self):\n return False\n\n # Graph operations\n def create_graph(self):\n return None\n\n def capture_to_graph(self, graph, pool=None, stream=None):\n from deepspeed.runtime.utils import noop_context\n return noop_context()\n\n def replay_graph(self, graph):\n return\n\n # Tensor operations\n @property\n def BFloat16Tensor(self):\n return\n\n @property\n def ByteTensor(self):\n return\n\n @property\n def DoubleTensor(self):\n return\n\n @property\n def FloatTensor(self):\n return\n\n @property\n def HalfTensor(self):\n return\n\n @property\n def IntTensor(self):\n return\n\n @property\n def LongTensor(self):\n return\n\n def pin_memory(self, tensor, align_bytes=1):\n return tensor.pin_memory()\n\n def is_pinned(self, tensor):\n return tensor.is_pinned()\n\n def on_accelerator(self, tensor):\n device_str = str(tensor.device)\n if device_str.startswith(\"mps\"):\n return True\n else:\n return False\n\n def op_builder_dir(self):\n try:\n # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed\n # if successful this also means we're doing a local install and not JIT compile path\n from op_builder import __deepspeed__ # noqa: F401 # type: ignore\n\n return \"op_builder\"\n except ImportError:\n return \"deepspeed.ops.op_builder\"\n\n # create an instance of op builder, specified by class_name\n def create_op_builder(self, op_name):\n builder_class = self.get_op_builder(op_name)\n if builder_class is not None:\n return builder_class()\n return None\n\n # return an op builder class, specified by class_name\n def get_op_builder(self, class_name):\n from deepspeed.ops.op_builder.cpu import NotImplementedBuilder\n\n return NotImplementedBuilder\n\n def build_extension(self):\n from torch.utils.cpp_extension import BuildExtension\n\n return BuildExtension\n\n def export_envs(self):\n return []\n", "path": "accelerator/mps_accelerator.py"}]}
| 2,910 | 147 |
gh_patches_debug_15241
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-14912
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Numpy FFTs return data types don't match input data types
<!-- Please describe the issue in detail here, and fill in the fields below -->
Unlike other functions, the NumPy FFT functions do not return arrays whose data types match the input arrays. Instead, `complex64` is promoted to `complex128`.
### Reproducing code example:
<!-- A short code example that reproduces the problem/missing feature. It should be
self-contained, i.e., possible to run as-is via 'python myproblem.py' -->
```python
import sys
import numpy as np
print(np.__version__, sys.version)
input = np.ones((5, 5), dtype='complex64')
output = np.square(input)
assert output.dtype == 'complex64', f"{output.dtype} square" # assert passes
output = np.fft.fft2(input)
assert output.dtype == 'complex64', f"{output.dtype} fft" # assert fails
```
### Numpy/Python version information:
<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->
1.17.3 3.8.0 | packaged by conda-forge | (default, Nov 6 2019, 23:20:36)
[GCC 7.3.0]
</issue>
<code>
[start of numpy/fft/__init__.py]
1 """
2 Discrete Fourier Transform (:mod:`numpy.fft`)
3 =============================================
4
5 .. currentmodule:: numpy.fft
6
7 Standard FFTs
8 -------------
9
10 .. autosummary::
11 :toctree: generated/
12
13 fft Discrete Fourier transform.
14 ifft Inverse discrete Fourier transform.
15 fft2 Discrete Fourier transform in two dimensions.
16 ifft2 Inverse discrete Fourier transform in two dimensions.
17 fftn Discrete Fourier transform in N-dimensions.
18 ifftn Inverse discrete Fourier transform in N dimensions.
19
20 Real FFTs
21 ---------
22
23 .. autosummary::
24 :toctree: generated/
25
26 rfft Real discrete Fourier transform.
27 irfft Inverse real discrete Fourier transform.
28 rfft2 Real discrete Fourier transform in two dimensions.
29 irfft2 Inverse real discrete Fourier transform in two dimensions.
30 rfftn Real discrete Fourier transform in N dimensions.
31 irfftn Inverse real discrete Fourier transform in N dimensions.
32
33 Hermitian FFTs
34 --------------
35
36 .. autosummary::
37 :toctree: generated/
38
39 hfft Hermitian discrete Fourier transform.
40 ihfft Inverse Hermitian discrete Fourier transform.
41
42 Helper routines
43 ---------------
44
45 .. autosummary::
46 :toctree: generated/
47
48 fftfreq Discrete Fourier Transform sample frequencies.
49 rfftfreq DFT sample frequencies (for usage with rfft, irfft).
50 fftshift Shift zero-frequency component to center of spectrum.
51 ifftshift Inverse of fftshift.
52
53
54 Background information
55 ----------------------
56
57 Fourier analysis is fundamentally a method for expressing a function as a
58 sum of periodic components, and for recovering the function from those
59 components. When both the function and its Fourier transform are
60 replaced with discretized counterparts, it is called the discrete Fourier
61 transform (DFT). The DFT has become a mainstay of numerical computing in
62 part because of a very fast algorithm for computing it, called the Fast
63 Fourier Transform (FFT), which was known to Gauss (1805) and was brought
64 to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
65 provide an accessible introduction to Fourier analysis and its
66 applications.
67
68 Because the discrete Fourier transform separates its input into
69 components that contribute at discrete frequencies, it has a great number
70 of applications in digital signal processing, e.g., for filtering, and in
71 this context the discretized input to the transform is customarily
72 referred to as a *signal*, which exists in the *time domain*. The output
73 is called a *spectrum* or *transform* and exists in the *frequency
74 domain*.
75
76 Implementation details
77 ----------------------
78
79 There are many ways to define the DFT, varying in the sign of the
80 exponent, normalization, etc. In this implementation, the DFT is defined
81 as
82
83 .. math::
84 A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
85 \\qquad k = 0,\\ldots,n-1.
86
87 The DFT is in general defined for complex inputs and outputs, and a
88 single-frequency component at linear frequency :math:`f` is
89 represented by a complex exponential
90 :math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
91 is the sampling interval.
92
93 The values in the result follow so-called "standard" order: If ``A =
94 fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
95 the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
96 contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
97 negative-frequency terms, in order of decreasingly negative frequency.
98 For an even number of input points, ``A[n/2]`` represents both positive and
99 negative Nyquist frequency, and is also purely real for real input. For
100 an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
101 frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
102 The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
103 of corresponding elements in the output. The routine
104 ``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
105 zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
106 that shift.
107
108 When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
109 is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
110 The phase spectrum is obtained by ``np.angle(A)``.
111
112 The inverse DFT is defined as
113
114 .. math::
115 a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
116 \\qquad m = 0,\\ldots,n-1.
117
118 It differs from the forward transform by the sign of the exponential
119 argument and the default normalization by :math:`1/n`.
120
121 Normalization
122 -------------
123 The default normalization has the direct transforms unscaled and the inverse
124 transforms are scaled by :math:`1/n`. It is possible to obtain unitary
125 transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
126 `None`) so that both direct and inverse transforms will be scaled by
127 :math:`1/\\sqrt{n}`.
128
129 Real and Hermitian transforms
130 -----------------------------
131
132 When the input is purely real, its transform is Hermitian, i.e., the
133 component at frequency :math:`f_k` is the complex conjugate of the
134 component at frequency :math:`-f_k`, which means that for real
135 inputs there is no information in the negative frequency components that
136 is not already available from the positive frequency components.
137 The family of `rfft` functions is
138 designed to operate on real inputs, and exploits this symmetry by
139 computing only the positive frequency components, up to and including the
140 Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
141 output points. The inverses of this family assumes the same symmetry of
142 its input, and for an output of ``n`` points uses ``n/2+1`` input points.
143
144 Correspondingly, when the spectrum is purely real, the signal is
145 Hermitian. The `hfft` family of functions exploits this symmetry by
146 using ``n/2+1`` complex points in the input (time) domain for ``n`` real
147 points in the frequency domain.
148
149 In higher dimensions, FFTs are used, e.g., for image analysis and
150 filtering. The computational efficiency of the FFT means that it can
151 also be a faster way to compute large convolutions, using the property
152 that a convolution in the time domain is equivalent to a point-by-point
153 multiplication in the frequency domain.
154
155 Higher dimensions
156 -----------------
157
158 In two dimensions, the DFT is defined as
159
160 .. math::
161 A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
162 a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
163 \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
164
165 which extends in the obvious way to higher dimensions, and the inverses
166 in higher dimensions also extend in the same way.
167
168 References
169 ----------
170
171 .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
172 machine calculation of complex Fourier series," *Math. Comput.*
173 19: 297-301.
174
175 .. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
176 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
177 12-13. Cambridge Univ. Press, Cambridge, UK.
178
179 Examples
180 --------
181
182 For examples, see the various functions.
183
184 """
185
186 from __future__ import division, absolute_import, print_function
187
188 from ._pocketfft import *
189 from .helper import *
190
191 from numpy._pytesttester import PytestTester
192 test = PytestTester(__name__)
193 del PytestTester
194
[end of numpy/fft/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -118,8 +118,16 @@
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
+Type Promotion
+--------------
+
+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
+``complex128`` arrays respectively. For an FFT implementation that does not
+promote input arrays, see `scipy.fftpack`.
+
Normalization
-------------
+
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
|
{"golden_diff": "diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py\n--- a/numpy/fft/__init__.py\n+++ b/numpy/fft/__init__.py\n@@ -118,8 +118,16 @@\n It differs from the forward transform by the sign of the exponential\n argument and the default normalization by :math:`1/n`.\n \n+Type Promotion\n+--------------\n+\n+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and\n+``complex128`` arrays respectively. For an FFT implementation that does not\n+promote input arrays, see `scipy.fftpack`.\n+\n Normalization\n -------------\n+\n The default normalization has the direct transforms unscaled and the inverse\n transforms are scaled by :math:`1/n`. It is possible to obtain unitary\n transforms by setting the keyword argument ``norm`` to ``\"ortho\"`` (default is\n", "issue": "Numpy FFTs return data types don't match input data types\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\n\r\nUnlike other functions, the NumPy FFT functions do not return arrays whose data types match the input arrays. Instead, `complex64` is promoted to `complex128`.\r\n\r\n### Reproducing code example:\r\n\r\n<!-- A short code example that reproduces the problem/missing feature. It should be\r\nself-contained, i.e., possible to run as-is via 'python myproblem.py' -->\r\n\r\n```python\r\nimport sys\r\nimport numpy as np\r\n\r\nprint(np.__version__, sys.version)\r\n\r\ninput = np.ones((5, 5), dtype='complex64')\r\n\r\noutput = np.square(input)\r\nassert output.dtype == 'complex64', f\"{output.dtype} square\" # assert passes\r\n\r\noutput = np.fft.fft2(input)\r\nassert output.dtype == 'complex64', f\"{output.dtype} fft\" # assert fails\r\n```\r\n\r\n### Numpy/Python version information:\r\n\r\n<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->\r\n1.17.3 3.8.0 | packaged by conda-forge | (default, Nov 6 2019, 23:20:36) \r\n[GCC 7.3.0]\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nDiscrete Fourier Transform (:mod:`numpy.fft`)\n=============================================\n\n.. currentmodule:: numpy.fft\n\nStandard FFTs\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n fft Discrete Fourier transform.\n ifft Inverse discrete Fourier transform.\n fft2 Discrete Fourier transform in two dimensions.\n ifft2 Inverse discrete Fourier transform in two dimensions.\n fftn Discrete Fourier transform in N-dimensions.\n ifftn Inverse discrete Fourier transform in N dimensions.\n\nReal FFTs\n---------\n\n.. autosummary::\n :toctree: generated/\n\n rfft Real discrete Fourier transform.\n irfft Inverse real discrete Fourier transform.\n rfft2 Real discrete Fourier transform in two dimensions.\n irfft2 Inverse real discrete Fourier transform in two dimensions.\n rfftn Real discrete Fourier transform in N dimensions.\n irfftn Inverse real discrete Fourier transform in N dimensions.\n\nHermitian FFTs\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n hfft Hermitian discrete Fourier transform.\n ihfft Inverse Hermitian discrete Fourier transform.\n\nHelper routines\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n fftfreq Discrete Fourier Transform sample frequencies.\n rfftfreq DFT sample frequencies (for usage with rfft, irfft).\n fftshift Shift zero-frequency component to center of spectrum.\n ifftshift Inverse of fftshift.\n\n\nBackground information\n----------------------\n\nFourier analysis is fundamentally a method for expressing a function as a\nsum of periodic components, and for recovering the function from those\ncomponents. When both the function and its Fourier transform are\nreplaced with discretized counterparts, it is called the discrete Fourier\ntransform (DFT). The DFT has become a mainstay of numerical computing in\npart because of a very fast algorithm for computing it, called the Fast\nFourier Transform (FFT), which was known to Gauss (1805) and was brought\nto light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_\nprovide an accessible introduction to Fourier analysis and its\napplications.\n\nBecause the discrete Fourier transform separates its input into\ncomponents that contribute at discrete frequencies, it has a great number\nof applications in digital signal processing, e.g., for filtering, and in\nthis context the discretized input to the transform is customarily\nreferred to as a *signal*, which exists in the *time domain*. The output\nis called a *spectrum* or *transform* and exists in the *frequency\ndomain*.\n\nImplementation details\n----------------------\n\nThere are many ways to define the DFT, varying in the sign of the\nexponent, normalization, etc. In this implementation, the DFT is defined\nas\n\n.. math::\n A_k = \\\\sum_{m=0}^{n-1} a_m \\\\exp\\\\left\\\\{-2\\\\pi i{mk \\\\over n}\\\\right\\\\}\n \\\\qquad k = 0,\\\\ldots,n-1.\n\nThe DFT is in general defined for complex inputs and outputs, and a\nsingle-frequency component at linear frequency :math:`f` is\nrepresented by a complex exponential\n:math:`a_m = \\\\exp\\\\{2\\\\pi i\\\\,f m\\\\Delta t\\\\}`, where :math:`\\\\Delta t`\nis the sampling interval.\n\nThe values in the result follow so-called \"standard\" order: If ``A =\nfft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of\nthe signal), which is always purely real for real inputs. Then ``A[1:n/2]``\ncontains the positive-frequency terms, and ``A[n/2+1:]`` contains the\nnegative-frequency terms, in order of decreasingly negative frequency.\nFor an even number of input points, ``A[n/2]`` represents both positive and\nnegative Nyquist frequency, and is also purely real for real input. For\nan odd number of input points, ``A[(n-1)/2]`` contains the largest positive\nfrequency, while ``A[(n+1)/2]`` contains the largest negative frequency.\nThe routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies\nof corresponding elements in the output. The routine\n``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the\nzero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes\nthat shift.\n\nWhen the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``\nis its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.\nThe phase spectrum is obtained by ``np.angle(A)``.\n\nThe inverse DFT is defined as\n\n.. math::\n a_m = \\\\frac{1}{n}\\\\sum_{k=0}^{n-1}A_k\\\\exp\\\\left\\\\{2\\\\pi i{mk\\\\over n}\\\\right\\\\}\n \\\\qquad m = 0,\\\\ldots,n-1.\n\nIt differs from the forward transform by the sign of the exponential\nargument and the default normalization by :math:`1/n`.\n\nNormalization\n-------------\nThe default normalization has the direct transforms unscaled and the inverse\ntransforms are scaled by :math:`1/n`. It is possible to obtain unitary\ntransforms by setting the keyword argument ``norm`` to ``\"ortho\"`` (default is\n`None`) so that both direct and inverse transforms will be scaled by\n:math:`1/\\\\sqrt{n}`.\n\nReal and Hermitian transforms\n-----------------------------\n\nWhen the input is purely real, its transform is Hermitian, i.e., the\ncomponent at frequency :math:`f_k` is the complex conjugate of the\ncomponent at frequency :math:`-f_k`, which means that for real\ninputs there is no information in the negative frequency components that\nis not already available from the positive frequency components.\nThe family of `rfft` functions is\ndesigned to operate on real inputs, and exploits this symmetry by\ncomputing only the positive frequency components, up to and including the\nNyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex\noutput points. The inverses of this family assumes the same symmetry of\nits input, and for an output of ``n`` points uses ``n/2+1`` input points.\n\nCorrespondingly, when the spectrum is purely real, the signal is\nHermitian. The `hfft` family of functions exploits this symmetry by\nusing ``n/2+1`` complex points in the input (time) domain for ``n`` real\npoints in the frequency domain.\n\nIn higher dimensions, FFTs are used, e.g., for image analysis and\nfiltering. The computational efficiency of the FFT means that it can\nalso be a faster way to compute large convolutions, using the property\nthat a convolution in the time domain is equivalent to a point-by-point\nmultiplication in the frequency domain.\n\nHigher dimensions\n-----------------\n\nIn two dimensions, the DFT is defined as\n\n.. math::\n A_{kl} = \\\\sum_{m=0}^{M-1} \\\\sum_{n=0}^{N-1}\n a_{mn}\\\\exp\\\\left\\\\{-2\\\\pi i \\\\left({mk\\\\over M}+{nl\\\\over N}\\\\right)\\\\right\\\\}\n \\\\qquad k = 0, \\\\ldots, M-1;\\\\quad l = 0, \\\\ldots, N-1,\n\nwhich extends in the obvious way to higher dimensions, and the inverses\nin higher dimensions also extend in the same way.\n\nReferences\n----------\n\n.. [CT] Cooley, James W., and John W. Tukey, 1965, \"An algorithm for the\n machine calculation of complex Fourier series,\" *Math. Comput.*\n 19: 297-301.\n\n.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,\n 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.\n 12-13. Cambridge Univ. Press, Cambridge, UK.\n\nExamples\n--------\n\nFor examples, see the various functions.\n\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom ._pocketfft import *\nfrom .helper import *\n\nfrom numpy._pytesttester import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n", "path": "numpy/fft/__init__.py"}]}
| 3,181 | 207 |
gh_patches_debug_35417
|
rasdani/github-patches
|
git_diff
|
holoviz__panel-2418
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unexpected Interactivity of "layered" plotly objects in Tabs
My first one of these so apologies if it's missing info / already flagged.
#### Software info:
python: 3.6.9
Panel = 0.6.2
plotly = 4.2.1
bokeh = 1.3.4
jupyter notebook server = 6.0.0
browser: Google chrome (and same behavior in embedded html output)
OS: Windows 8.1
#### Description of expected behavior and the observed behavior
Below applies to within jupyter notebook, in browser window (.show()) and in html output (.save(embed = True))
__Expected behavior:__ plotly objects within panel tabs to have same interactivity as when not in tabs.
__observed behavior:__ Only plotly object in "bottom level" (last in list of tabs, "fig2" tab in example) retains full interactivity (pan, zoom, select, legend trace select etc.)). All other tab "levels" (tabs other than the last one in tab list, "fig1" tab inexample) retain only legend select interactivity. Interactions with the area bound by the axis (where a crosshair is seen) in "fig1" results in changes to the "fig2" plotly object.
#### Complete, minimal, self-contained example code that reproduces the issue
```
import plotly.graph_objs as go
import panel as pn
import numpy as np
pn.extension('plotly')
x = np.linspace(0,10,100)
y = np.sin(x)
y2 = np.cos(x)
data1 = [go.Scatter(x = x, y = y, name = 'scatter sin(x)', mode="markers+lines"),
go.Scatter(x = x, y = y2, name = 'scatter cos(x)', mode="markers+lines")]
data2 = [go.Bar(x = x, y = y, name = 'bar sin(x)'),
go.Bar(x = x, y = y2, name = 'bar cos(x)')]
fig1 = go.Figure(data = data1)
fig2 = go.Figure(data = data2)
pn.Column('## App with plotly objects in tabs:',
'Loss of interactivity on "fig1" tab plotly object',
pn.Tabs(('fig1', pn.Pane(fig1)),
('fig2', pn.Pane(fig2)))
)
```
#### Screenshots of issue

</issue>
<code>
[start of panel/pane/plotly.py]
1 """
2 Defines a PlotlyPane which renders a plotly plot using PlotlyPlot
3 bokeh model.
4 """
5 import numpy as np
6 import param
7
8 from bokeh.models import ColumnDataSource
9 from pyviz_comms import JupyterComm
10
11 from .base import PaneBase
12 from ..util import isdatetime, lazy_load
13 from ..viewable import Layoutable
14
15
16
17 class Plotly(PaneBase):
18 """
19 Plotly panes allow rendering plotly Figures and traces.
20
21 For efficiency any array objects found inside a Figure are added
22 to a ColumnDataSource which allows using binary transport to sync
23 the figure on bokeh server and via Comms.
24 """
25
26 click_data = param.Dict(doc="Click callback data")
27
28 clickannotation_data = param.Dict(doc="Clickannotation callback data")
29
30 config = param.Dict(doc="Config data")
31
32 hover_data = param.Dict(doc="Hover callback data")
33
34 relayout_data = param.Dict(doc="Relayout callback data")
35
36 restyle_data = param.List(doc="Restyle callback data")
37
38 selected_data = param.Dict(doc="Selected callback data")
39
40 viewport = param.Dict(doc="Current viewport state")
41
42 viewport_update_policy = param.Selector(default="mouseup", doc="""
43 Policy by which the viewport parameter is updated during user interactions.
44
45 * "mouseup": updates are synchronized when mouse button is
46 released after panning
47 * "continuous": updates are synchronized continually while panning
48 * "throttle": updates are synchronized while panning, at
49 intervals determined by the viewport_update_throttle parameter
50 """, objects=["mouseup", "continuous", "throttle"])
51
52 viewport_update_throttle = param.Integer(default=200, bounds=(0, None), doc="""
53 Time interval in milliseconds at which viewport updates are
54 synchronized when viewport_update_policy is "throttle".""")
55
56 _render_count = param.Integer(default=0, doc="""
57 Number of renders, increment to trigger re-render""")
58
59 priority = 0.8
60
61 _updates = True
62
63 @classmethod
64 def applies(cls, obj):
65 return ((isinstance(obj, list) and obj and all(cls.applies(o) for o in obj)) or
66 hasattr(obj, 'to_plotly_json') or (isinstance(obj, dict)
67 and 'data' in obj and 'layout' in obj))
68
69 def __init__(self, object=None, **params):
70 super().__init__(object, **params)
71 self._figure = None
72 self._update_figure()
73
74 def _to_figure(self, obj):
75 import plotly.graph_objs as go
76 if isinstance(obj, go.Figure):
77 return obj
78 elif isinstance(obj, dict):
79 data, layout = obj['data'], obj['layout']
80 elif isinstance(obj, tuple):
81 data, layout = obj
82 else:
83 data, layout = obj, {}
84 data = data if isinstance(data, list) else [data]
85 return go.Figure(data=data, layout=layout)
86
87 @staticmethod
88 def _get_sources(json):
89 sources = []
90 traces = json.get('data', [])
91 for trace in traces:
92 data = {}
93 Plotly._get_sources_for_trace(trace, data)
94 sources.append(ColumnDataSource(data))
95 return sources
96
97 @staticmethod
98 def _get_sources_for_trace(json, data, parent_path=''):
99 for key, value in list(json.items()):
100 full_path = key if not parent_path else (parent_path + '.' + key)
101 if isinstance(value, np.ndarray):
102 # Extract numpy array
103 data[full_path] = [json.pop(key)]
104 elif isinstance(value, dict):
105 # Recurse into dictionaries:
106 Plotly._get_sources_for_trace(value, data=data, parent_path=full_path)
107 elif isinstance(value, list) and value and isinstance(value[0], dict):
108 # recurse into object arrays:
109 for i, element in enumerate(value):
110 element_path = full_path + '.' + str(i)
111 Plotly._get_sources_for_trace(
112 element, data=data, parent_path=element_path
113 )
114
115 @param.depends('object', watch=True)
116 def _update_figure(self):
117 import plotly.graph_objs as go
118
119 if (self.object is None or
120 type(self.object) is not go.Figure or
121 self.object is self._figure):
122 return
123
124 # Monkey patch the message stubs used by FigureWidget.
125 # We only patch `Figure` objects (not subclasses like FigureWidget) so
126 # we don't interfere with subclasses that override these methods.
127 fig = self.object
128 fig._send_addTraces_msg = lambda *_, **__: self.param.trigger('object')
129 fig._send_moveTraces_msg = lambda *_, **__: self.param.trigger('object')
130 fig._send_deleteTraces_msg = lambda *_, **__: self.param.trigger('object')
131 fig._send_restyle_msg = lambda *_, **__: self.param.trigger('object')
132 fig._send_relayout_msg = lambda *_, **__: self.param.trigger('object')
133 fig._send_update_msg = lambda *_, **__: self.param.trigger('object')
134 fig._send_animate_msg = lambda *_, **__: self.param.trigger('object')
135 self._figure = fig
136
137 def _update_data_sources(self, cds, trace):
138 trace_arrays = {}
139 Plotly._get_sources_for_trace(trace, trace_arrays)
140
141 update_sources = False
142 for key, new_col in trace_arrays.items():
143 new = new_col[0]
144
145 try:
146 old = cds.data.get(key)[0]
147 update_array = (
148 (type(old) != type(new)) or
149 (new.shape != old.shape) or
150 (new != old).any())
151 except Exception:
152 update_array = True
153
154 if update_array:
155 update_sources = True
156 cds.data[key] = [new]
157
158 return update_sources
159
160 @staticmethod
161 def _plotly_json_wrapper(fig):
162 """Wraps around to_plotly_json and applies necessary fixes.
163
164 For #382: Map datetime elements to strings.
165 """
166 json = fig.to_plotly_json()
167 data = json['data']
168
169 for idx in range(len(data)):
170 for key in data[idx]:
171 if isdatetime(data[idx][key]):
172 arr = data[idx][key]
173 if isinstance(arr, np.ndarray):
174 arr = arr.astype(str)
175 else:
176 arr = [str(v) for v in arr]
177 data[idx][key] = arr
178 return json
179
180 def _init_params(self):
181 viewport_params = [p for p in self.param if 'viewport' in p]
182 parameters = list(Layoutable.param)+viewport_params
183 params = {p: getattr(self, p) for p in parameters
184 if getattr(self, p) is not None}
185
186 if self.object is None:
187 json, sources = {}, []
188 else:
189 fig = self._to_figure(self.object)
190 json = self._plotly_json_wrapper(fig)
191 sources = Plotly._get_sources(json)
192
193 params['_render_count'] = self._render_count
194 params['config'] = self.config or {}
195 params['data'] = json.get('data', [])
196 params['data_sources'] = sources
197 params['layout'] = layout = json.get('layout', {})
198 if layout.get('autosize') and self.sizing_mode is self.param.sizing_mode.default:
199 params['sizing_mode'] = 'stretch_both'
200 return params
201
202 def _get_model(self, doc, root=None, parent=None, comm=None):
203 PlotlyPlot = lazy_load('panel.models.plotly', 'PlotlyPlot', isinstance(comm, JupyterComm))
204 model = PlotlyPlot(**self._init_params())
205 if root is None:
206 root = model
207 self._link_props(model, self._linkable_params, doc, root, comm)
208 self._models[root.ref['id']] = (model, parent)
209 return model
210
211 def _update(self, ref=None, model=None):
212 if self.object is None:
213 model.update(data=[], layout={})
214 model._render_count += 1
215 return
216
217 fig = self._to_figure(self.object)
218 json = self._plotly_json_wrapper(fig)
219 layout = json.get('layout')
220
221 traces = json['data']
222 new_sources = []
223 update_sources = False
224 for i, trace in enumerate(traces):
225 if i < len(model.data_sources):
226 cds = model.data_sources[i]
227 else:
228 cds = ColumnDataSource()
229 new_sources.append(cds)
230
231 update_sources = self._update_data_sources(cds, trace) or update_sources
232 try:
233 update_layout = model.layout != layout
234 except Exception:
235 update_layout = True
236
237 # Determine if model needs updates
238 if (len(model.data) != len(traces)):
239 update_data = True
240 else:
241 update_data = False
242 for new, old in zip(traces, model.data):
243 try:
244 update_data = (
245 {k: v for k, v in new.items() if k != 'uid'} !=
246 {k: v for k, v in old.items() if k != 'uid'}
247 )
248 except Exception:
249 update_data = True
250 if update_data:
251 break
252
253 updates = {}
254 if self.sizing_mode is self.param.sizing_mode.default and 'autosize' in layout:
255 autosize = layout.get('autosize')
256 if autosize and model.sizing_mode != 'stretch_both':
257 updates['sizing_mode'] = 'stretch_both'
258 elif not autosize and model.sizing_mode != 'fixed':
259 updates['sizing_mode'] = 'fixed'
260
261 if new_sources:
262 updates['data_sources'] = model.data_sources + new_sources
263
264 if update_data:
265 updates['data'] = json.get('data')
266
267 if update_layout:
268 updates['layout'] = layout
269
270 if updates:
271 model.update(**updates)
272
273 # Check if we should trigger rendering
274 if updates or update_sources:
275 model._render_count += 1
276
[end of panel/pane/plotly.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/panel/pane/plotly.py b/panel/pane/plotly.py
--- a/panel/pane/plotly.py
+++ b/panel/pane/plotly.py
@@ -5,12 +5,12 @@
import numpy as np
import param
-from bokeh.models import ColumnDataSource
+from bokeh.models import ColumnDataSource, CustomJS, Tabs
from pyviz_comms import JupyterComm
from .base import PaneBase
from ..util import isdatetime, lazy_load
-from ..viewable import Layoutable
+from ..viewable import Layoutable, Viewable
@@ -206,6 +206,8 @@
root = model
self._link_props(model, self._linkable_params, doc, root, comm)
self._models[root.ref['id']] = (model, parent)
+ if _patch_tabs_plotly not in Viewable._preprocessing_hooks:
+ Viewable._preprocessing_hooks.append(_patch_tabs_plotly)
return model
def _update(self, ref=None, model=None):
@@ -273,3 +275,42 @@
# Check if we should trigger rendering
if updates or update_sources:
model._render_count += 1
+
+
+def _patch_tabs_plotly(viewable, root):
+ """
+ A preprocessing hook which ensures that any Plotly panes rendered
+ inside Tabs are only visible when the tab they are in is active.
+ This is a workaround for https://github.com/holoviz/panel/issues/804.
+ """
+ from ..models.plotly import PlotlyPlot
+
+ tabs_models = list(root.select({'type': Tabs}))
+ plotly_models = root.select({'type': PlotlyPlot})
+
+ for model in plotly_models:
+ parent_tabs = []
+ for tabs in tabs_models:
+ if tabs.select_one({'id': model.id}):
+ parent_tabs.append(tabs)
+ parent_tab = None
+ for tabs in parent_tabs:
+ if not any(tabs.select_one(pt) for pt in parent_tabs if pt is not tabs):
+ parent_tab = tabs
+ break
+ if parent_tab is None:
+ return
+ for i, tab in enumerate(parent_tab.tabs):
+ if tab.select_one({'id': model.id}):
+ break
+ updated = False
+ code = "model.visible = cb_obj.active == i;"
+ for cb in parent_tab.js_property_callbacks.get('change:active', []):
+ if cb.code == code and cb.args.get('model') is model:
+ cb.args['i'] = i
+ updated = True
+ if updated:
+ continue
+ callback = CustomJS(args={'model': model, 'i': i, 'margin': model.margin}, code=code)
+ parent_tab.js_on_change('active', callback)
+ model.visible = parent_tab.active == i
|
{"golden_diff": "diff --git a/panel/pane/plotly.py b/panel/pane/plotly.py\n--- a/panel/pane/plotly.py\n+++ b/panel/pane/plotly.py\n@@ -5,12 +5,12 @@\n import numpy as np\n import param\n \n-from bokeh.models import ColumnDataSource\n+from bokeh.models import ColumnDataSource, CustomJS, Tabs\n from pyviz_comms import JupyterComm\n \n from .base import PaneBase\n from ..util import isdatetime, lazy_load\n-from ..viewable import Layoutable\n+from ..viewable import Layoutable, Viewable\n \n \n \n@@ -206,6 +206,8 @@\n root = model\n self._link_props(model, self._linkable_params, doc, root, comm)\n self._models[root.ref['id']] = (model, parent)\n+ if _patch_tabs_plotly not in Viewable._preprocessing_hooks:\n+ Viewable._preprocessing_hooks.append(_patch_tabs_plotly)\n return model\n \n def _update(self, ref=None, model=None):\n@@ -273,3 +275,42 @@\n # Check if we should trigger rendering\n if updates or update_sources:\n model._render_count += 1\n+\n+\n+def _patch_tabs_plotly(viewable, root):\n+ \"\"\"\n+ A preprocessing hook which ensures that any Plotly panes rendered\n+ inside Tabs are only visible when the tab they are in is active.\n+ This is a workaround for https://github.com/holoviz/panel/issues/804.\n+ \"\"\"\n+ from ..models.plotly import PlotlyPlot\n+\n+ tabs_models = list(root.select({'type': Tabs}))\n+ plotly_models = root.select({'type': PlotlyPlot})\n+\n+ for model in plotly_models:\n+ parent_tabs = []\n+ for tabs in tabs_models:\n+ if tabs.select_one({'id': model.id}):\n+ parent_tabs.append(tabs)\n+ parent_tab = None\n+ for tabs in parent_tabs:\n+ if not any(tabs.select_one(pt) for pt in parent_tabs if pt is not tabs):\n+ parent_tab = tabs\n+ break\n+ if parent_tab is None:\n+ return\n+ for i, tab in enumerate(parent_tab.tabs):\n+ if tab.select_one({'id': model.id}):\n+ break\n+ updated = False\n+ code = \"model.visible = cb_obj.active == i;\"\n+ for cb in parent_tab.js_property_callbacks.get('change:active', []):\n+ if cb.code == code and cb.args.get('model') is model:\n+ cb.args['i'] = i\n+ updated = True\n+ if updated:\n+ continue\n+ callback = CustomJS(args={'model': model, 'i': i, 'margin': model.margin}, code=code)\n+ parent_tab.js_on_change('active', callback)\n+ model.visible = parent_tab.active == i\n", "issue": "unexpected Interactivity of \"layered\" plotly objects in Tabs\nMy first one of these so apologies if it's missing info / already flagged.\r\n\r\n#### Software info:\r\npython: 3.6.9\r\nPanel = 0.6.2\r\nplotly = 4.2.1\r\nbokeh = 1.3.4\r\njupyter notebook server = 6.0.0\r\nbrowser: Google chrome (and same behavior in embedded html output)\r\nOS: Windows 8.1\r\n\r\n#### Description of expected behavior and the observed behavior\r\nBelow applies to within jupyter notebook, in browser window (.show()) and in html output (.save(embed = True))\r\n\r\n__Expected behavior:__ plotly objects within panel tabs to have same interactivity as when not in tabs.\r\n\r\n__observed behavior:__ Only plotly object in \"bottom level\" (last in list of tabs, \"fig2\" tab in example) retains full interactivity (pan, zoom, select, legend trace select etc.)). All other tab \"levels\" (tabs other than the last one in tab list, \"fig1\" tab inexample) retain only legend select interactivity. Interactions with the area bound by the axis (where a crosshair is seen) in \"fig1\" results in changes to the \"fig2\" plotly object.\r\n\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\n```\r\nimport plotly.graph_objs as go\r\nimport panel as pn\r\nimport numpy as np\r\npn.extension('plotly')\r\n\r\nx = np.linspace(0,10,100)\r\ny = np.sin(x)\r\ny2 = np.cos(x)\r\n\r\ndata1 = [go.Scatter(x = x, y = y, name = 'scatter sin(x)', mode=\"markers+lines\"),\r\n go.Scatter(x = x, y = y2, name = 'scatter cos(x)', mode=\"markers+lines\")]\r\n\r\ndata2 = [go.Bar(x = x, y = y, name = 'bar sin(x)'),\r\n go.Bar(x = x, y = y2, name = 'bar cos(x)')]\r\n\r\nfig1 = go.Figure(data = data1)\r\nfig2 = go.Figure(data = data2)\r\n\r\npn.Column('## App with plotly objects in tabs:',\r\n 'Loss of interactivity on \"fig1\" tab plotly object',\r\n pn.Tabs(('fig1', pn.Pane(fig1)),\r\n ('fig2', pn.Pane(fig2)))\r\n )\r\n\r\n```\r\n#### Screenshots of issue\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nDefines a PlotlyPane which renders a plotly plot using PlotlyPlot\nbokeh model.\n\"\"\"\nimport numpy as np\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom .base import PaneBase\nfrom ..util import isdatetime, lazy_load\nfrom ..viewable import Layoutable\n\n\n\nclass Plotly(PaneBase):\n \"\"\"\n Plotly panes allow rendering plotly Figures and traces.\n\n For efficiency any array objects found inside a Figure are added\n to a ColumnDataSource which allows using binary transport to sync\n the figure on bokeh server and via Comms.\n \"\"\"\n\n click_data = param.Dict(doc=\"Click callback data\")\n\n clickannotation_data = param.Dict(doc=\"Clickannotation callback data\")\n\n config = param.Dict(doc=\"Config data\")\n\n hover_data = param.Dict(doc=\"Hover callback data\")\n\n relayout_data = param.Dict(doc=\"Relayout callback data\")\n\n restyle_data = param.List(doc=\"Restyle callback data\")\n\n selected_data = param.Dict(doc=\"Selected callback data\")\n\n viewport = param.Dict(doc=\"Current viewport state\")\n\n viewport_update_policy = param.Selector(default=\"mouseup\", doc=\"\"\"\n Policy by which the viewport parameter is updated during user interactions.\n\n * \"mouseup\": updates are synchronized when mouse button is\n released after panning\n * \"continuous\": updates are synchronized continually while panning\n * \"throttle\": updates are synchronized while panning, at \n intervals determined by the viewport_update_throttle parameter\n \"\"\", objects=[\"mouseup\", \"continuous\", \"throttle\"])\n\n viewport_update_throttle = param.Integer(default=200, bounds=(0, None), doc=\"\"\"\n Time interval in milliseconds at which viewport updates are\n synchronized when viewport_update_policy is \"throttle\".\"\"\")\n\n _render_count = param.Integer(default=0, doc=\"\"\"\n Number of renders, increment to trigger re-render\"\"\")\n\n priority = 0.8\n\n _updates = True\n\n @classmethod\n def applies(cls, obj):\n return ((isinstance(obj, list) and obj and all(cls.applies(o) for o in obj)) or\n hasattr(obj, 'to_plotly_json') or (isinstance(obj, dict)\n and 'data' in obj and 'layout' in obj))\n\n def __init__(self, object=None, **params):\n super().__init__(object, **params)\n self._figure = None\n self._update_figure()\n\n def _to_figure(self, obj):\n import plotly.graph_objs as go\n if isinstance(obj, go.Figure):\n return obj\n elif isinstance(obj, dict):\n data, layout = obj['data'], obj['layout']\n elif isinstance(obj, tuple):\n data, layout = obj\n else:\n data, layout = obj, {}\n data = data if isinstance(data, list) else [data]\n return go.Figure(data=data, layout=layout)\n\n @staticmethod\n def _get_sources(json):\n sources = []\n traces = json.get('data', [])\n for trace in traces:\n data = {}\n Plotly._get_sources_for_trace(trace, data)\n sources.append(ColumnDataSource(data))\n return sources\n\n @staticmethod\n def _get_sources_for_trace(json, data, parent_path=''):\n for key, value in list(json.items()):\n full_path = key if not parent_path else (parent_path + '.' + key)\n if isinstance(value, np.ndarray):\n # Extract numpy array\n data[full_path] = [json.pop(key)]\n elif isinstance(value, dict):\n # Recurse into dictionaries:\n Plotly._get_sources_for_trace(value, data=data, parent_path=full_path)\n elif isinstance(value, list) and value and isinstance(value[0], dict):\n # recurse into object arrays:\n for i, element in enumerate(value):\n element_path = full_path + '.' + str(i)\n Plotly._get_sources_for_trace(\n element, data=data, parent_path=element_path\n )\n\n @param.depends('object', watch=True)\n def _update_figure(self):\n import plotly.graph_objs as go\n\n if (self.object is None or\n type(self.object) is not go.Figure or\n self.object is self._figure):\n return\n\n # Monkey patch the message stubs used by FigureWidget.\n # We only patch `Figure` objects (not subclasses like FigureWidget) so\n # we don't interfere with subclasses that override these methods.\n fig = self.object\n fig._send_addTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_moveTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_deleteTraces_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_restyle_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_relayout_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_update_msg = lambda *_, **__: self.param.trigger('object')\n fig._send_animate_msg = lambda *_, **__: self.param.trigger('object')\n self._figure = fig\n\n def _update_data_sources(self, cds, trace):\n trace_arrays = {}\n Plotly._get_sources_for_trace(trace, trace_arrays)\n\n update_sources = False\n for key, new_col in trace_arrays.items():\n new = new_col[0]\n\n try:\n old = cds.data.get(key)[0]\n update_array = (\n (type(old) != type(new)) or\n (new.shape != old.shape) or\n (new != old).any())\n except Exception:\n update_array = True\n\n if update_array:\n update_sources = True\n cds.data[key] = [new]\n\n return update_sources\n\n @staticmethod\n def _plotly_json_wrapper(fig):\n \"\"\"Wraps around to_plotly_json and applies necessary fixes.\n\n For #382: Map datetime elements to strings.\n \"\"\"\n json = fig.to_plotly_json()\n data = json['data']\n\n for idx in range(len(data)):\n for key in data[idx]:\n if isdatetime(data[idx][key]):\n arr = data[idx][key]\n if isinstance(arr, np.ndarray):\n arr = arr.astype(str) \n else:\n arr = [str(v) for v in arr]\n data[idx][key] = arr\n return json\n\n def _init_params(self):\n viewport_params = [p for p in self.param if 'viewport' in p]\n parameters = list(Layoutable.param)+viewport_params\n params = {p: getattr(self, p) for p in parameters\n if getattr(self, p) is not None}\n\n if self.object is None:\n json, sources = {}, []\n else:\n fig = self._to_figure(self.object)\n json = self._plotly_json_wrapper(fig)\n sources = Plotly._get_sources(json)\n\n params['_render_count'] = self._render_count\n params['config'] = self.config or {}\n params['data'] = json.get('data', [])\n params['data_sources'] = sources\n params['layout'] = layout = json.get('layout', {})\n if layout.get('autosize') and self.sizing_mode is self.param.sizing_mode.default:\n params['sizing_mode'] = 'stretch_both'\n return params\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n PlotlyPlot = lazy_load('panel.models.plotly', 'PlotlyPlot', isinstance(comm, JupyterComm))\n model = PlotlyPlot(**self._init_params())\n if root is None:\n root = model\n self._link_props(model, self._linkable_params, doc, root, comm)\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _update(self, ref=None, model=None):\n if self.object is None:\n model.update(data=[], layout={})\n model._render_count += 1\n return\n\n fig = self._to_figure(self.object)\n json = self._plotly_json_wrapper(fig)\n layout = json.get('layout')\n\n traces = json['data']\n new_sources = []\n update_sources = False\n for i, trace in enumerate(traces):\n if i < len(model.data_sources):\n cds = model.data_sources[i]\n else:\n cds = ColumnDataSource()\n new_sources.append(cds)\n\n update_sources = self._update_data_sources(cds, trace) or update_sources\n try:\n update_layout = model.layout != layout\n except Exception:\n update_layout = True\n\n # Determine if model needs updates\n if (len(model.data) != len(traces)):\n update_data = True\n else:\n update_data = False\n for new, old in zip(traces, model.data):\n try:\n update_data = (\n {k: v for k, v in new.items() if k != 'uid'} !=\n {k: v for k, v in old.items() if k != 'uid'}\n )\n except Exception:\n update_data = True\n if update_data:\n break\n\n updates = {}\n if self.sizing_mode is self.param.sizing_mode.default and 'autosize' in layout:\n autosize = layout.get('autosize')\n if autosize and model.sizing_mode != 'stretch_both':\n updates['sizing_mode'] = 'stretch_both'\n elif not autosize and model.sizing_mode != 'fixed':\n updates['sizing_mode'] = 'fixed'\n\n if new_sources:\n updates['data_sources'] = model.data_sources + new_sources\n\n if update_data:\n updates['data'] = json.get('data')\n\n if update_layout:\n updates['layout'] = layout\n\n if updates:\n model.update(**updates)\n\n # Check if we should trigger rendering\n if updates or update_sources:\n model._render_count += 1\n", "path": "panel/pane/plotly.py"}]}
| 4,046 | 654 |
gh_patches_debug_5450
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-1713
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set up Sentry for bugbug
Adding it will help investigate issues (like the Mercurial woes).
</issue>
<code>
[start of http_service/bugbug_http/worker.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # This Source Code Form is subject to the terms of the Mozilla Public
4 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
5 # You can obtain one at http://mozilla.org/MPL/2.0/.
6
7 import os
8 import sys
9
10 from redis import Redis
11 from rq import Connection, Worker
12
13 import bugbug_http.boot
14
15
16 def main():
17 # Bootstrap the worker assets
18 bugbug_http.boot.boot_worker()
19
20 # Provide queue names to listen to as arguments to this script,
21 # similar to rq worker
22 redis_url = os.environ.get("REDIS_URL", "redis://localhost/0")
23 redis_conn = Redis.from_url(redis_url)
24 with Connection(connection=redis_conn):
25 qs = sys.argv[1:] or ["default"]
26
27 w = Worker(qs)
28 w.work()
29
30
31 if __name__ == "__main__":
32 main()
33
[end of http_service/bugbug_http/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py
--- a/http_service/bugbug_http/worker.py
+++ b/http_service/bugbug_http/worker.py
@@ -7,10 +7,20 @@
import os
import sys
+import sentry_sdk
from redis import Redis
from rq import Connection, Worker
+from sentry_sdk.integrations.rq import RqIntegration
import bugbug_http.boot
+from bugbug import get_bugbug_version
+
+if os.environ.get("SENTRY_DSN"):
+ sentry_sdk.init(
+ os.environ.get("SENTRY_DSN"),
+ integrations=[RqIntegration()],
+ release=get_bugbug_version(),
+ )
def main():
|
{"golden_diff": "diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py\n--- a/http_service/bugbug_http/worker.py\n+++ b/http_service/bugbug_http/worker.py\n@@ -7,10 +7,20 @@\n import os\n import sys\n \n+import sentry_sdk\n from redis import Redis\n from rq import Connection, Worker\n+from sentry_sdk.integrations.rq import RqIntegration\n \n import bugbug_http.boot\n+from bugbug import get_bugbug_version\n+\n+if os.environ.get(\"SENTRY_DSN\"):\n+ sentry_sdk.init(\n+ os.environ.get(\"SENTRY_DSN\"),\n+ integrations=[RqIntegration()],\n+ release=get_bugbug_version(),\n+ )\n \n \n def main():\n", "issue": "Set up Sentry for bugbug\nAdding it will help investigate issues (like the Mercurial woes).\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\n\nfrom redis import Redis\nfrom rq import Connection, Worker\n\nimport bugbug_http.boot\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}]}
| 832 | 169 |
gh_patches_debug_58376
|
rasdani/github-patches
|
git_diff
|
deis__deis-323
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`deis run` generates ugly error if app code not yet pushed
I ran `deis run ls -la` after I had created the app, but before I had pushed the code with `git push deis master`. Here is the error I received:
```
ben$ example-python-flask > deis run ls -la
Warning: non-zero return code 255
lxc-start: No such file or directory - failed to mount '/opt/deis/runtime/slugs/hushed-sailfish-1/app' on '/usr/lib/lxc/root///app'
lxc-start: failed to setup the mount entries for '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'
lxc-start: failed to setup the container
lxc-start: invalid sequence number 1. expected 2
lxc-start: failed to spawn '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'
lxc-start: Device or resource busy - failed to remove cgroup '/sys/fs/cgroup/cpuset//lxc/5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'
```
</issue>
<code>
[start of api/tasks.py]
1
2 from __future__ import unicode_literals
3 import importlib
4
5 from celery import task
6 from celery.canvas import group
7
8 from deis import settings
9 from provider import import_provider_module
10
11 # import user-defined config management module
12 CM = importlib.import_module(settings.CM_MODULE)
13
14
15 @task
16 def build_layer(layer):
17 provider = import_provider_module(layer.flavor.provider.type)
18 provider.build_layer(layer.flat())
19
20
21 @task
22 def destroy_layer(layer):
23 provider = import_provider_module(layer.flavor.provider.type)
24 provider.destroy_layer(layer.flat())
25 layer.delete()
26
27
28 @task
29 def build_node(node):
30 provider = import_provider_module(node.layer.flavor.provider.type)
31 provider_id, fqdn, metadata = provider.build_node(node.flat())
32 node.provider_id = provider_id
33 node.fqdn = fqdn
34 node.metadata = metadata
35 node.save()
36 CM.bootstrap_node(node.flat())
37
38
39 @task
40 def destroy_node(node):
41 provider = import_provider_module(node.layer.flavor.provider.type)
42 provider.destroy_node(node.flat())
43 CM.purge_node(node.flat())
44 node.delete()
45
46
47 @task
48 def converge_node(node):
49 output, rc = CM.converge_node(node.flat())
50 return output, rc
51
52
53 @task
54 def run_node(node, command):
55 output, rc = CM.run_node(node.flat(), command)
56 return output, rc
57
58
59 @task
60 def build_formation(formation):
61 return
62
63
64 @task
65 def destroy_formation(formation):
66 app_tasks = [destroy_app.si(a) for a in formation.app_set.all()]
67 node_tasks = [destroy_node.si(n) for n in formation.node_set.all()]
68 layer_tasks = [destroy_layer.si(l) for l in formation.layer_set.all()]
69 group(app_tasks + node_tasks).apply_async().join()
70 group(layer_tasks).apply_async().join()
71 CM.purge_formation(formation.flat())
72 formation.delete()
73
74
75 @task
76 def converge_formation(formation):
77 nodes = formation.node_set.all()
78 subtasks = []
79 for n in nodes:
80 subtask = converge_node.si(n)
81 subtasks.append(subtask)
82 group(*subtasks).apply_async().join()
83
84
85 @task
86 def build_app(app):
87 return
88
89
90 @task
91 def destroy_app(app):
92 CM.purge_app(app.flat())
93 app.delete()
94 app.formation.publish()
95
96
97 @task
98 def converge_controller():
99 CM.converge_controller()
100 return None
101
[end of api/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/api/tasks.py b/api/tasks.py
--- a/api/tasks.py
+++ b/api/tasks.py
@@ -53,6 +53,8 @@
@task
def run_node(node, command):
output, rc = CM.run_node(node.flat(), command)
+ if rc != 0 and 'failed to setup the container' in output:
+ output = '\033[35mPlease run `git push deis master` first.\033[0m\n' + output
return output, rc
|
{"golden_diff": "diff --git a/api/tasks.py b/api/tasks.py\n--- a/api/tasks.py\n+++ b/api/tasks.py\n@@ -53,6 +53,8 @@\n @task\n def run_node(node, command):\n output, rc = CM.run_node(node.flat(), command)\n+ if rc != 0 and 'failed to setup the container' in output:\n+ output = '\\033[35mPlease run `git push deis master` first.\\033[0m\\n' + output\n return output, rc\n", "issue": "`deis run` generates ugly error if app code not yet pushed\nI ran `deis run ls -la` after I had created the app, but before I had pushed the code with `git push deis master`. Here is the error I received:\n\n```\nben$ example-python-flask > deis run ls -la\nWarning: non-zero return code 255\nlxc-start: No such file or directory - failed to mount '/opt/deis/runtime/slugs/hushed-sailfish-1/app' on '/usr/lib/lxc/root///app'\nlxc-start: failed to setup the mount entries for '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\nlxc-start: failed to setup the container\nlxc-start: invalid sequence number 1. expected 2\nlxc-start: failed to spawn '5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\nlxc-start: Device or resource busy - failed to remove cgroup '/sys/fs/cgroup/cpuset//lxc/5f4f4d932501338fa2062d52e5893dfbd3933fa09102c67493a169a2a87ee479'\n```\n\n", "before_files": [{"content": "\nfrom __future__ import unicode_literals\nimport importlib\n\nfrom celery import task\nfrom celery.canvas import group\n\nfrom deis import settings\nfrom provider import import_provider_module\n\n# import user-defined config management module\nCM = importlib.import_module(settings.CM_MODULE)\n\n\n@task\ndef build_layer(layer):\n provider = import_provider_module(layer.flavor.provider.type)\n provider.build_layer(layer.flat())\n\n\n@task\ndef destroy_layer(layer):\n provider = import_provider_module(layer.flavor.provider.type)\n provider.destroy_layer(layer.flat())\n layer.delete()\n\n\n@task\ndef build_node(node):\n provider = import_provider_module(node.layer.flavor.provider.type)\n provider_id, fqdn, metadata = provider.build_node(node.flat())\n node.provider_id = provider_id\n node.fqdn = fqdn\n node.metadata = metadata\n node.save()\n CM.bootstrap_node(node.flat())\n\n\n@task\ndef destroy_node(node):\n provider = import_provider_module(node.layer.flavor.provider.type)\n provider.destroy_node(node.flat())\n CM.purge_node(node.flat())\n node.delete()\n\n\n@task\ndef converge_node(node):\n output, rc = CM.converge_node(node.flat())\n return output, rc\n\n\n@task\ndef run_node(node, command):\n output, rc = CM.run_node(node.flat(), command)\n return output, rc\n\n\n@task\ndef build_formation(formation):\n return\n\n\n@task\ndef destroy_formation(formation):\n app_tasks = [destroy_app.si(a) for a in formation.app_set.all()]\n node_tasks = [destroy_node.si(n) for n in formation.node_set.all()]\n layer_tasks = [destroy_layer.si(l) for l in formation.layer_set.all()]\n group(app_tasks + node_tasks).apply_async().join()\n group(layer_tasks).apply_async().join()\n CM.purge_formation(formation.flat())\n formation.delete()\n\n\n@task\ndef converge_formation(formation):\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.si(n)\n subtasks.append(subtask)\n group(*subtasks).apply_async().join()\n\n\n@task\ndef build_app(app):\n return\n\n\n@task\ndef destroy_app(app):\n CM.purge_app(app.flat())\n app.delete()\n app.formation.publish()\n\n\n@task\ndef converge_controller():\n CM.converge_controller()\n return None\n", "path": "api/tasks.py"}]}
| 1,630 | 117 |
gh_patches_debug_14336
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-2077
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't install from tarball on GitHub
```
$ pip install https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz
Collecting https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz
Downloading https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz
- 7.0MB 40.4MB/s
Complete output from command python setup.py egg_info:
fatal: Not a git repository (or any of the parent directories): .git
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-l5WfLH-build/setup.py", line 152, in <module>
version = importVersion()
File "/tmp/pip-l5WfLH-build/setup.py", line 132, in importVersion
new = version_template.expand_()
File "version_template.py", line 131, in expand_
return ''.join("%s = %s\n" % (k, repr(resolve(k))) for k, v in variables.items())
File "version_template.py", line 131, in <genexpr>
return ''.join("%s = %s\n" % (k, repr(resolve(k))) for k, v in variables.items())
File "version_template.py", line 127, in resolve
v = v()
File "version_template.py", line 85, in dockerShortTag
return shortVersion()
File "version_template.py", line 46, in shortVersion
return _version(shorten=True)
File "version_template.py", line 51, in _version
currentCommit()[:7 if shorten else None],
File "version_template.py", line 111, in currentCommit
return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]
File "/usr/lib/python2.7/subprocess.py", line 223, in check_output
raise CalledProcessError(retcode, cmd, output=output)
subprocess.CalledProcessError: Command 'git log --pretty=oneline -n 1 -- $(pwd)' returned non-zero exit status 128
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in /tmp/pip-l5WfLH-build/
```
</issue>
<code>
[start of version_template.py]
1 # Copyright (C) 2015-2016 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This script is a template for src/toil/version.py. Running it without arguments echoes all
16 globals, i.e. module attributes. Constant assignments will be echoed verbatim while callables
17 will be invoked and their result echoed as an assignment using the function name as the left-hand
18 side and the return value of the function as right-hand side. To prevent a module attribute from
19 being echoed, start or end the attribute name with an underscore. To print the value of a single
20 symbol, pass the name of that attribute to the script as a command line argument. You can also
21 import the expand_ function and invoke it directly with either no or exactly one argument."""
22
23 # Note to maintainers:
24 #
25 # - don't import at module level unless you want the imported value to be included in the output
26 # - only import from the Python standard run-time library (you can't have any dependencies)
27
28 baseVersion = '3.15.0a1'
29
30 cgcloudVersion = '1.6.0a1.dev393'
31
32
33 def version():
34 """
35 A version identifier that includes the full-legth commit SHA1 and an optional suffix to
36 indicate that the working copy is dirty.
37 """
38 return _version()
39
40
41 def shortVersion():
42 """
43 A version identifier that includes the abbreviated commit SHA1 and an optional suffix to
44 indicate that the working copy is dirty.
45 """
46 return _version(shorten=True)
47
48
49 def _version(shorten=False):
50 return '-'.join(filter(None, [distVersion(),
51 currentCommit()[:7 if shorten else None],
52 ('dirty' if dirty() else None)]))
53
54
55 def distVersion():
56 """
57 The distribution version identifying a published release on PyPI.
58 """
59 from pkg_resources import parse_version
60 build_number = buildNumber()
61 parsedBaseVersion = parse_version(baseVersion)
62 if isinstance(parsedBaseVersion, tuple):
63 raise RuntimeError("Setuptools version 8.0 or newer required. Update by running "
64 "'pip install setuptools --upgrade'")
65
66 if build_number is not None and parsedBaseVersion.is_prerelease:
67 return baseVersion + '.dev' + build_number
68 else:
69 return baseVersion
70
71
72 def dockerTag():
73 """
74 The primary tag of the Docker image for the appliance. This uniquely identifies the appliance
75 image.
76 """
77 return version()
78
79
80 def dockerShortTag():
81 """
82 A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for
83 convenience.
84 """
85 return shortVersion()
86
87
88 def dockerMinimalTag():
89 """
90 A minimal tag with which to tag the appliance image for convenience. Does not include
91 information about the git commit or working copy dirtyness.
92 """
93 return distVersion()
94
95
96 dockerRegistry = 'quay.io/ucsc_cgl'
97
98 dockerName = 'toil'
99
100
101 def buildNumber():
102 """
103 The Jenkins build number, if defined, else None.
104 """
105 import os
106 return os.getenv('BUILD_NUMBER')
107
108
109 def currentCommit():
110 from subprocess import check_output
111 return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]
112
113
114 def dirty():
115 from subprocess import call
116 return 0 != call('(git diff --exit-code '
117 '&& git diff --cached --exit-code) > /dev/null', shell=True)
118
119
120 def expand_(name=None):
121 variables = {k: v for k, v in globals().items()
122 if not k.startswith('_') and not k.endswith('_')}
123
124 def resolve(k):
125 v = variables[k]
126 if callable(v):
127 v = v()
128 return v
129
130 if name is None:
131 return ''.join("%s = %s\n" % (k, repr(resolve(k))) for k, v in variables.items())
132 else:
133 return resolve(name)
134
135
136 def _main():
137 import sys
138 sys.stdout.write(expand_(*sys.argv[1:]))
139
140
141 if __name__ == '__main__':
142 _main()
143
[end of version_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/version_template.py b/version_template.py
--- a/version_template.py
+++ b/version_template.py
@@ -108,13 +108,21 @@
def currentCommit():
from subprocess import check_output
- return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]
+ try:
+ output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]
+ except:
+ # Return this we are not in a git environment.
+ return '000'
+ return output
def dirty():
from subprocess import call
- return 0 != call('(git diff --exit-code '
- '&& git diff --cached --exit-code) > /dev/null', shell=True)
+ try:
+ return 0 != call('(git diff --exit-code '
+ '&& git diff --cached --exit-code) > /dev/null', shell=True)
+ except:
+ return False # In case the git call fails.
def expand_(name=None):
|
{"golden_diff": "diff --git a/version_template.py b/version_template.py\n--- a/version_template.py\n+++ b/version_template.py\n@@ -108,13 +108,21 @@\n \n def currentCommit():\n from subprocess import check_output\n- return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]\n+ try:\n+ output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]\n+ except:\n+ # Return this we are not in a git environment.\n+ return '000'\n+ return output\n \n \n def dirty():\n from subprocess import call\n- return 0 != call('(git diff --exit-code '\n- '&& git diff --cached --exit-code) > /dev/null', shell=True)\n+ try:\n+ return 0 != call('(git diff --exit-code '\n+ '&& git diff --cached --exit-code) > /dev/null', shell=True)\n+ except:\n+ return False # In case the git call fails.\n \n \n def expand_(name=None):\n", "issue": "Can't install from tarball on GitHub\n```\r\n$ pip install https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz\r\nCollecting https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz\r\n Downloading https://github.com/BD2KGenomics/toil/archive/3.14.0.tar.gz\r\n - 7.0MB 40.4MB/s\r\n Complete output from command python setup.py egg_info:\r\n fatal: Not a git repository (or any of the parent directories): .git\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-l5WfLH-build/setup.py\", line 152, in <module>\r\n version = importVersion()\r\n File \"/tmp/pip-l5WfLH-build/setup.py\", line 132, in importVersion\r\n new = version_template.expand_()\r\n File \"version_template.py\", line 131, in expand_\r\n return ''.join(\"%s = %s\\n\" % (k, repr(resolve(k))) for k, v in variables.items())\r\n File \"version_template.py\", line 131, in <genexpr>\r\n return ''.join(\"%s = %s\\n\" % (k, repr(resolve(k))) for k, v in variables.items())\r\n File \"version_template.py\", line 127, in resolve\r\n v = v()\r\n File \"version_template.py\", line 85, in dockerShortTag\r\n return shortVersion()\r\n File \"version_template.py\", line 46, in shortVersion\r\n return _version(shorten=True)\r\n File \"version_template.py\", line 51, in _version\r\n currentCommit()[:7 if shorten else None],\r\n File \"version_template.py\", line 111, in currentCommit\r\n return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]\r\n File \"/usr/lib/python2.7/subprocess.py\", line 223, in check_output\r\n raise CalledProcessError(retcode, cmd, output=output)\r\n subprocess.CalledProcessError: Command 'git log --pretty=oneline -n 1 -- $(pwd)' returned non-zero exit status 128\r\n \r\n ----------------------------------------\r\nCommand \"python setup.py egg_info\" failed with error code 1 in /tmp/pip-l5WfLH-build/\r\n```\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is a template for src/toil/version.py. Running it without arguments echoes all\nglobals, i.e. module attributes. Constant assignments will be echoed verbatim while callables\nwill be invoked and their result echoed as an assignment using the function name as the left-hand\nside and the return value of the function as right-hand side. To prevent a module attribute from\nbeing echoed, start or end the attribute name with an underscore. To print the value of a single\nsymbol, pass the name of that attribute to the script as a command line argument. You can also\nimport the expand_ function and invoke it directly with either no or exactly one argument.\"\"\"\n\n# Note to maintainers:\n#\n# - don't import at module level unless you want the imported value to be included in the output\n# - only import from the Python standard run-time library (you can't have any dependencies)\n\nbaseVersion = '3.15.0a1'\n\ncgcloudVersion = '1.6.0a1.dev393'\n\n\ndef version():\n \"\"\"\n A version identifier that includes the full-legth commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version()\n\n\ndef shortVersion():\n \"\"\"\n A version identifier that includes the abbreviated commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version(shorten=True)\n\n\ndef _version(shorten=False):\n return '-'.join(filter(None, [distVersion(),\n currentCommit()[:7 if shorten else None],\n ('dirty' if dirty() else None)]))\n\n\ndef distVersion():\n \"\"\"\n The distribution version identifying a published release on PyPI.\n \"\"\"\n from pkg_resources import parse_version\n build_number = buildNumber()\n parsedBaseVersion = parse_version(baseVersion)\n if isinstance(parsedBaseVersion, tuple):\n raise RuntimeError(\"Setuptools version 8.0 or newer required. Update by running \"\n \"'pip install setuptools --upgrade'\")\n\n if build_number is not None and parsedBaseVersion.is_prerelease:\n return baseVersion + '.dev' + build_number\n else:\n return baseVersion\n\n\ndef dockerTag():\n \"\"\"\n The primary tag of the Docker image for the appliance. This uniquely identifies the appliance\n image.\n \"\"\"\n return version()\n\n\ndef dockerShortTag():\n \"\"\"\n A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for\n convenience.\n \"\"\"\n return shortVersion()\n\n\ndef dockerMinimalTag():\n \"\"\"\n A minimal tag with which to tag the appliance image for convenience. Does not include\n information about the git commit or working copy dirtyness.\n \"\"\"\n return distVersion()\n\n\ndockerRegistry = 'quay.io/ucsc_cgl'\n\ndockerName = 'toil'\n\n\ndef buildNumber():\n \"\"\"\n The Jenkins build number, if defined, else None.\n \"\"\"\n import os\n return os.getenv('BUILD_NUMBER')\n\n\ndef currentCommit():\n from subprocess import check_output\n return check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).split()[0]\n\n\ndef dirty():\n from subprocess import call\n return 0 != call('(git diff --exit-code '\n '&& git diff --cached --exit-code) > /dev/null', shell=True)\n\n\ndef expand_(name=None):\n variables = {k: v for k, v in globals().items()\n if not k.startswith('_') and not k.endswith('_')}\n\n def resolve(k):\n v = variables[k]\n if callable(v):\n v = v()\n return v\n\n if name is None:\n return ''.join(\"%s = %s\\n\" % (k, repr(resolve(k))) for k, v in variables.items())\n else:\n return resolve(name)\n\n\ndef _main():\n import sys\n sys.stdout.write(expand_(*sys.argv[1:]))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "version_template.py"}]}
| 2,418 | 248 |
gh_patches_debug_15911
|
rasdani/github-patches
|
git_diff
|
pallets__click-2599
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecate `__version__` attribute
The `__version__` attribute is an old pattern from early in Python packaging. Setuptools eventually made it easier to use the pattern by allowing reading the value from the attribute at build time, and some other build backends have done the same.
However, there's no reason to expose this directly in code anymore. It's usually easier to use feature detection (`hasattr`, `try/except`) instead. `importlib.metadata.version("click")` can be used to get the version at runtime in a standard way, if it's really needed.
</issue>
<code>
[start of src/click/__init__.py]
1 """
2 Click is a simple Python module inspired by the stdlib optparse to make
3 writing command line scripts fun. Unlike other modules, it's based
4 around a simple API that does not come with too much magic and is
5 composable.
6 """
7 from __future__ import annotations
8
9 from .core import Argument as Argument
10 from .core import Command as Command
11 from .core import CommandCollection as CommandCollection
12 from .core import Context as Context
13 from .core import Group as Group
14 from .core import Option as Option
15 from .core import Parameter as Parameter
16 from .decorators import argument as argument
17 from .decorators import command as command
18 from .decorators import confirmation_option as confirmation_option
19 from .decorators import group as group
20 from .decorators import help_option as help_option
21 from .decorators import make_pass_decorator as make_pass_decorator
22 from .decorators import option as option
23 from .decorators import pass_context as pass_context
24 from .decorators import pass_obj as pass_obj
25 from .decorators import password_option as password_option
26 from .decorators import version_option as version_option
27 from .exceptions import Abort as Abort
28 from .exceptions import BadArgumentUsage as BadArgumentUsage
29 from .exceptions import BadOptionUsage as BadOptionUsage
30 from .exceptions import BadParameter as BadParameter
31 from .exceptions import ClickException as ClickException
32 from .exceptions import FileError as FileError
33 from .exceptions import MissingParameter as MissingParameter
34 from .exceptions import NoSuchOption as NoSuchOption
35 from .exceptions import UsageError as UsageError
36 from .formatting import HelpFormatter as HelpFormatter
37 from .formatting import wrap_text as wrap_text
38 from .globals import get_current_context as get_current_context
39 from .termui import clear as clear
40 from .termui import confirm as confirm
41 from .termui import echo_via_pager as echo_via_pager
42 from .termui import edit as edit
43 from .termui import getchar as getchar
44 from .termui import launch as launch
45 from .termui import pause as pause
46 from .termui import progressbar as progressbar
47 from .termui import prompt as prompt
48 from .termui import secho as secho
49 from .termui import style as style
50 from .termui import unstyle as unstyle
51 from .types import BOOL as BOOL
52 from .types import Choice as Choice
53 from .types import DateTime as DateTime
54 from .types import File as File
55 from .types import FLOAT as FLOAT
56 from .types import FloatRange as FloatRange
57 from .types import INT as INT
58 from .types import IntRange as IntRange
59 from .types import ParamType as ParamType
60 from .types import Path as Path
61 from .types import STRING as STRING
62 from .types import Tuple as Tuple
63 from .types import UNPROCESSED as UNPROCESSED
64 from .types import UUID as UUID
65 from .utils import echo as echo
66 from .utils import format_filename as format_filename
67 from .utils import get_app_dir as get_app_dir
68 from .utils import get_binary_stream as get_binary_stream
69 from .utils import get_text_stream as get_text_stream
70 from .utils import open_file as open_file
71
72 __version__ = "8.2.0.dev0"
73
74
75 def __getattr__(name: str) -> object:
76 import warnings
77
78 if name == "BaseCommand":
79 from .core import _BaseCommand
80
81 warnings.warn(
82 "'BaseCommand' is deprecated and will be removed in Click 9.0. Use"
83 " 'Command' instead.",
84 DeprecationWarning,
85 stacklevel=2,
86 )
87 return _BaseCommand
88
89 if name == "MultiCommand":
90 from .core import _MultiCommand
91
92 warnings.warn(
93 "'MultiCommand' is deprecated and will be removed in Click 9.0. Use"
94 " 'Group' instead.",
95 DeprecationWarning,
96 stacklevel=2,
97 )
98 return _MultiCommand
99
100 if name == "OptionParser":
101 from .parser import _OptionParser
102
103 warnings.warn(
104 "'OptionParser' is deprecated and will be removed in Click 9.0. The"
105 " old parser is available in 'optparse'.",
106 DeprecationWarning,
107 stacklevel=2,
108 )
109 return _OptionParser
110
111 raise AttributeError(name)
112
[end of src/click/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/click/__init__.py b/src/click/__init__.py
--- a/src/click/__init__.py
+++ b/src/click/__init__.py
@@ -69,8 +69,6 @@
from .utils import get_text_stream as get_text_stream
from .utils import open_file as open_file
-__version__ = "8.2.0.dev0"
-
def __getattr__(name: str) -> object:
import warnings
@@ -108,4 +106,17 @@
)
return _OptionParser
+ if name == "__version__":
+ import importlib.metadata
+ import warnings
+
+ warnings.warn(
+ "The '__version__' attribute is deprecated and will be removed in"
+ " Click 9.1. Use feature detection or"
+ " 'importlib.metadata.version(\"click\")' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return importlib.metadata.version("click")
+
raise AttributeError(name)
|
{"golden_diff": "diff --git a/src/click/__init__.py b/src/click/__init__.py\n--- a/src/click/__init__.py\n+++ b/src/click/__init__.py\n@@ -69,8 +69,6 @@\n from .utils import get_text_stream as get_text_stream\n from .utils import open_file as open_file\n \n-__version__ = \"8.2.0.dev0\"\n-\n \n def __getattr__(name: str) -> object:\n import warnings\n@@ -108,4 +106,17 @@\n )\n return _OptionParser\n \n+ if name == \"__version__\":\n+ import importlib.metadata\n+ import warnings\n+\n+ warnings.warn(\n+ \"The '__version__' attribute is deprecated and will be removed in\"\n+ \" Click 9.1. Use feature detection or\"\n+ \" 'importlib.metadata.version(\\\"click\\\")' instead.\",\n+ DeprecationWarning,\n+ stacklevel=2,\n+ )\n+ return importlib.metadata.version(\"click\")\n+\n raise AttributeError(name)\n", "issue": "deprecate `__version__` attribute\nThe `__version__` attribute is an old pattern from early in Python packaging. Setuptools eventually made it easier to use the pattern by allowing reading the value from the attribute at build time, and some other build backends have done the same.\r\n\r\nHowever, there's no reason to expose this directly in code anymore. It's usually easier to use feature detection (`hasattr`, `try/except`) instead. `importlib.metadata.version(\"click\")` can be used to get the version at runtime in a standard way, if it's really needed.\n", "before_files": [{"content": "\"\"\"\nClick is a simple Python module inspired by the stdlib optparse to make\nwriting command line scripts fun. Unlike other modules, it's based\naround a simple API that does not come with too much magic and is\ncomposable.\n\"\"\"\nfrom __future__ import annotations\n\nfrom .core import Argument as Argument\nfrom .core import Command as Command\nfrom .core import CommandCollection as CommandCollection\nfrom .core import Context as Context\nfrom .core import Group as Group\nfrom .core import Option as Option\nfrom .core import Parameter as Parameter\nfrom .decorators import argument as argument\nfrom .decorators import command as command\nfrom .decorators import confirmation_option as confirmation_option\nfrom .decorators import group as group\nfrom .decorators import help_option as help_option\nfrom .decorators import make_pass_decorator as make_pass_decorator\nfrom .decorators import option as option\nfrom .decorators import pass_context as pass_context\nfrom .decorators import pass_obj as pass_obj\nfrom .decorators import password_option as password_option\nfrom .decorators import version_option as version_option\nfrom .exceptions import Abort as Abort\nfrom .exceptions import BadArgumentUsage as BadArgumentUsage\nfrom .exceptions import BadOptionUsage as BadOptionUsage\nfrom .exceptions import BadParameter as BadParameter\nfrom .exceptions import ClickException as ClickException\nfrom .exceptions import FileError as FileError\nfrom .exceptions import MissingParameter as MissingParameter\nfrom .exceptions import NoSuchOption as NoSuchOption\nfrom .exceptions import UsageError as UsageError\nfrom .formatting import HelpFormatter as HelpFormatter\nfrom .formatting import wrap_text as wrap_text\nfrom .globals import get_current_context as get_current_context\nfrom .termui import clear as clear\nfrom .termui import confirm as confirm\nfrom .termui import echo_via_pager as echo_via_pager\nfrom .termui import edit as edit\nfrom .termui import getchar as getchar\nfrom .termui import launch as launch\nfrom .termui import pause as pause\nfrom .termui import progressbar as progressbar\nfrom .termui import prompt as prompt\nfrom .termui import secho as secho\nfrom .termui import style as style\nfrom .termui import unstyle as unstyle\nfrom .types import BOOL as BOOL\nfrom .types import Choice as Choice\nfrom .types import DateTime as DateTime\nfrom .types import File as File\nfrom .types import FLOAT as FLOAT\nfrom .types import FloatRange as FloatRange\nfrom .types import INT as INT\nfrom .types import IntRange as IntRange\nfrom .types import ParamType as ParamType\nfrom .types import Path as Path\nfrom .types import STRING as STRING\nfrom .types import Tuple as Tuple\nfrom .types import UNPROCESSED as UNPROCESSED\nfrom .types import UUID as UUID\nfrom .utils import echo as echo\nfrom .utils import format_filename as format_filename\nfrom .utils import get_app_dir as get_app_dir\nfrom .utils import get_binary_stream as get_binary_stream\nfrom .utils import get_text_stream as get_text_stream\nfrom .utils import open_file as open_file\n\n__version__ = \"8.2.0.dev0\"\n\n\ndef __getattr__(name: str) -> object:\n import warnings\n\n if name == \"BaseCommand\":\n from .core import _BaseCommand\n\n warnings.warn(\n \"'BaseCommand' is deprecated and will be removed in Click 9.0. Use\"\n \" 'Command' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _BaseCommand\n\n if name == \"MultiCommand\":\n from .core import _MultiCommand\n\n warnings.warn(\n \"'MultiCommand' is deprecated and will be removed in Click 9.0. Use\"\n \" 'Group' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _MultiCommand\n\n if name == \"OptionParser\":\n from .parser import _OptionParser\n\n warnings.warn(\n \"'OptionParser' is deprecated and will be removed in Click 9.0. The\"\n \" old parser is available in 'optparse'.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return _OptionParser\n\n raise AttributeError(name)\n", "path": "src/click/__init__.py"}]}
| 1,796 | 232 |
gh_patches_debug_289
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-2282
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
test: s3: use moto to test multipart objects
Currently, we are unable to use it because of [this bug](https://github.com/spulec/moto/issues/2154). When it is fixed, we should switch to it from using actual s3 for unit testing. Related to https://github.com/iterative/dvc/pull/1867
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from setuptools.command.build_py import build_py as _build_py
3 import os
4 import sys
5
6
7 # https://packaging.python.org/guides/single-sourcing-package-version/
8 pkg_dir = os.path.dirname(__file__)
9
10 # This will define __version__ implicitly
11 with open(os.path.join(pkg_dir, "dvc", "version.py")) as fobj:
12 exec(fobj.read())
13
14 version = __version__ # noqa: F821
15
16
17 # To achieve consistency between the build version and the one provided
18 # by your package during runtime, you need to **pin** the build version.
19 #
20 # This custom class will replace the version.py module with a **static**
21 # `__version__` that your package can read at runtime, assuring consistancy.
22 #
23 # References:
24 # - https://docs.python.org/3.7/distutils/extending.html
25 # - https://github.com/python/mypy
26 class build_py(_build_py):
27 def pin_version(self):
28 path = os.path.join(self.build_lib, "dvc")
29 self.mkpath(path)
30 with open(os.path.join(path, "version.py"), "w") as fobj:
31 fobj.write("# AUTOGENERATED at build time by setup.py\n")
32 fobj.write('__version__ = "{}"\n'.format(version))
33
34 def run(self):
35 self.execute(self.pin_version, ())
36 _build_py.run(self)
37
38
39 install_requires = [
40 "ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
41 "configparser>=3.5.0",
42 "zc.lockfile>=1.2.1",
43 "future>=0.16.0",
44 "colorama>=0.3.9",
45 "configobj>=5.0.6",
46 "gitpython>=2.1.8",
47 "setuptools>=34.0.0",
48 "nanotime>=0.5.2",
49 "pyasn1>=0.4.1",
50 "schema>=0.6.7",
51 "jsonpath-ng>=1.4.3",
52 "requests>=2.22.0",
53 "grandalf==0.6",
54 "asciimatics>=1.10.0",
55 "distro>=1.3.0",
56 "appdirs>=1.4.3",
57 "treelib>=1.5.5",
58 "inflect>=2.1.0",
59 "humanize>=0.5.1",
60 "ruamel.yaml>=0.15.91",
61 "psutil==5.6.2",
62 "funcy>=1.12",
63 "pathspec>=0.5.9",
64 "shortuuid>=0.5.0",
65 "win-unicode-console>=0.5; sys_platform == 'win32'",
66 ]
67
68 if sys.version_info[0] == 2:
69 install_requires.append("networkx>=2.1,<2.3")
70 else:
71 install_requires.append("networkx>=2.1")
72
73 # Extra dependencies for remote integrations
74 gs = ["google-cloud-storage==1.13.0"]
75 s3 = ["boto3==1.9.115"]
76 azure = ["azure-storage-blob==2.0.1"]
77 oss = ["oss2==2.6.1"]
78 ssh = ["paramiko>=2.5.0"]
79 all_remotes = gs + s3 + azure + ssh + oss
80
81 # Extra dependecies to run tests
82 tests_requirements = [
83 "PyInstaller==3.4",
84 "wheel>=0.31.1",
85 "pydot>=1.2.4",
86 # Test requirements:
87 "pytest>=4.6.0",
88 "pytest-timeout>=1.3.3",
89 "pytest-cov>=2.6.1",
90 "pytest-xdist>=1.26.1",
91 "pytest-mock>=1.10.4",
92 "flaky>=3.5.3",
93 "mock>=3.0.0",
94 "xmltodict>=0.11.0",
95 "awscli>=1.16.125",
96 "google-compute-engine",
97 "pywin32; sys_platform == 'win32'",
98 "Pygments", # required by collective.checkdocs,
99 "collective.checkdocs",
100 "flake8",
101 "flake8-docstrings",
102 "pydocstyle<4.0",
103 "jaraco.windows==3.9.2",
104 "mock-ssh-server>=0.5.0",
105 ]
106
107 if (sys.version_info) >= (3, 6):
108 tests_requirements.append("black==19.3b0")
109
110 setup(
111 name="dvc",
112 version=version,
113 description="Git for data scientists - manage your code and data together",
114 long_description=open("README.rst", "r").read(),
115 author="Dmitry Petrov",
116 author_email="[email protected]",
117 download_url="https://github.com/iterative/dvc",
118 license="Apache License 2.0",
119 install_requires=install_requires,
120 extras_require={
121 "all": all_remotes,
122 "gs": gs,
123 "s3": s3,
124 "azure": azure,
125 "oss": oss,
126 "ssh": ssh,
127 # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1
128 ":python_version=='2.7'": ["futures", "pathlib2"],
129 "tests": tests_requirements,
130 },
131 keywords="data science, data version control, machine learning",
132 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
133 classifiers=[
134 "Development Status :: 4 - Beta",
135 "Programming Language :: Python :: 2",
136 "Programming Language :: Python :: 2.7",
137 "Programming Language :: Python :: 3",
138 "Programming Language :: Python :: 3.5",
139 "Programming Language :: Python :: 3.6",
140 "Programming Language :: Python :: 3.7",
141 ],
142 packages=find_packages(exclude=["tests"]),
143 include_package_data=True,
144 url="http://dataversioncontrol.com",
145 entry_points={"console_scripts": ["dvc = dvc.main:main"]},
146 cmdclass={"build_py": build_py},
147 zip_safe=False,
148 )
149
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,6 +102,7 @@
"pydocstyle<4.0",
"jaraco.windows==3.9.2",
"mock-ssh-server>=0.5.0",
+ "moto",
]
if (sys.version_info) >= (3, 6):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,6 +102,7 @@\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.5.0\",\n+ \"moto\",\n ]\n \n if (sys.version_info) >= (3, 6):\n", "issue": "test: s3: use moto to test multipart objects\nCurrently, we are unable to use it because of [this bug](https://github.com/spulec/moto/issues/2154). When it is fixed, we should switch to it from using actual s3 for unit testing. Related to https://github.com/iterative/dvc/pull/1867\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py as _build_py\nimport os\nimport sys\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(__file__)\n\n# This will define __version__ implicitly\nwith open(os.path.join(pkg_dir, \"dvc\", \"version.py\")) as fobj:\n exec(fobj.read())\n\nversion = __version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"gitpython>=2.1.8\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"ruamel.yaml>=0.15.91\",\n \"psutil==5.6.2\",\n \"funcy>=1.12\",\n \"pathspec>=0.5.9\",\n \"shortuuid>=0.5.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"networkx>=2.1,<2.3\")\nelse:\n install_requires.append(\"networkx>=2.1\")\n\n# Extra dependencies for remote integrations\ngs = [\"google-cloud-storage==1.13.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==2.0.1\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.5.0\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\n# Extra dependecies to run tests\ntests_requirements = [\n \"PyInstaller==3.4\",\n \"wheel>=0.31.1\",\n \"pydot>=1.2.4\",\n # Test requirements:\n \"pytest>=4.6.0\",\n \"pytest-timeout>=1.3.3\",\n \"pytest-cov>=2.6.1\",\n \"pytest-xdist>=1.26.1\",\n \"pytest-mock>=1.10.4\",\n \"flaky>=3.5.3\",\n \"mock>=3.0.0\",\n \"xmltodict>=0.11.0\",\n \"awscli>=1.16.125\",\n \"google-compute-engine\",\n \"pywin32; sys_platform == 'win32'\",\n \"Pygments\", # required by collective.checkdocs,\n \"collective.checkdocs\",\n \"flake8\",\n \"flake8-docstrings\",\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.5.0\",\n]\n\nif (sys.version_info) >= (3, 6):\n tests_requirements.append(\"black==19.3b0\")\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n \":python_version=='2.7'\": [\"futures\", \"pathlib2\"],\n \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dataversioncontrol.com\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 2,369 | 90 |
gh_patches_debug_1752
|
rasdani/github-patches
|
git_diff
|
spack__spack-4809
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
problem with xproto
the xproto could be compile properly but while installing, i come across the following problem
```
==> 'make' '-j8'
/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive
Making all in specs
Making all in SIAddresses
make[3]: Nothing to be done for `all'.
make[3]: Nothing to be done for `all-am'.
make[2]: Nothing to be done for `all-am'.
==> 'make' '-j8' 'install'
Making install in specs
Making install in SIAddresses
make[3]: Nothing to be done for `install-exec-am'.
make[3]: Nothing to be done for `install-data-am'.
make[3]: Nothing to be done for `install-exec-am'.
/spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'
/spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'
mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share: File exists
mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc: File exists
mkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto: File exists
make[3]: *** [install-dist_shelfDATA] Error 1
make[3]: *** Waiting for unfinished jobs....
make[2]: *** [install-am] Error 2
make[1]: *** [install-recursive] Error 1
make: *** [install-recursive] Error 1
```
</issue>
<code>
[start of var/spack/repos/builtin/packages/xproto/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Xproto(AutotoolsPackage):
29 """X Window System Core Protocol.
30
31 This package provides the headers and specification documents defining
32 the X Window System Core Protocol, Version 11.
33
34 It also includes a number of headers that aren't purely protocol related,
35 but are depended upon by many other X Window System packages to provide
36 common definitions and porting layer."""
37
38 homepage = "http://cgit.freedesktop.org/xorg/proto/x11proto"
39 url = "https://www.x.org/archive/individual/proto/xproto-7.0.31.tar.gz"
40
41 version('7.0.31', '04b925bf9e472c80f9212615cd684f1e')
42 version('7.0.29', '16a78dd2c5ad73011105c96235f6a0af')
43
44 depends_on('[email protected]:', type='build')
45 depends_on('util-macros', type='build')
46
[end of var/spack/repos/builtin/packages/xproto/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/xproto/package.py b/var/spack/repos/builtin/packages/xproto/package.py
--- a/var/spack/repos/builtin/packages/xproto/package.py
+++ b/var/spack/repos/builtin/packages/xproto/package.py
@@ -43,3 +43,8 @@
depends_on('[email protected]:', type='build')
depends_on('util-macros', type='build')
+
+ def install(self, spec, prefix):
+ # Installation fails in parallel
+ # See https://github.com/LLNL/spack/issues/4805
+ make('install', parallel=False)
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/xproto/package.py b/var/spack/repos/builtin/packages/xproto/package.py\n--- a/var/spack/repos/builtin/packages/xproto/package.py\n+++ b/var/spack/repos/builtin/packages/xproto/package.py\n@@ -43,3 +43,8 @@\n \n depends_on('[email protected]:', type='build')\n depends_on('util-macros', type='build')\n+\n+ def install(self, spec, prefix):\n+ # Installation fails in parallel\n+ # See https://github.com/LLNL/spack/issues/4805\n+ make('install', parallel=False)\n", "issue": "problem with xproto\nthe xproto could be compile properly but while installing, i come across the following problem\r\n\r\n```\r\n==> 'make' '-j8'\r\n/Applications/Xcode.app/Contents/Developer/usr/bin/make all-recursive\r\nMaking all in specs\r\nMaking all in SIAddresses\r\nmake[3]: Nothing to be done for `all'.\r\nmake[3]: Nothing to be done for `all-am'.\r\nmake[2]: Nothing to be done for `all-am'.\r\n==> 'make' '-j8' 'install'\r\nMaking install in specs\r\nMaking install in SIAddresses\r\nmake[3]: Nothing to be done for `install-exec-am'.\r\nmake[3]: Nothing to be done for `install-data-am'.\r\nmake[3]: Nothing to be done for `install-exec-am'.\r\n /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'\r\n /spack/var/spack/stage/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/xproto-7.0.31/install-sh -c -d '/spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto'\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share: File exists\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc: File exists\r\nmkdir: /spack/opt/spack/darwin-sierra-x86_64/clang-3.9.0/xproto-7.0.31-rgclwhmbundqudnhjaavmnvuzceqlgsh/share/doc/xproto: File exists\r\nmake[3]: *** [install-dist_shelfDATA] Error 1\r\nmake[3]: *** Waiting for unfinished jobs....\r\nmake[2]: *** [install-am] Error 2\r\nmake[1]: *** [install-recursive] Error 1\r\nmake: *** [install-recursive] Error 1\r\n```\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Xproto(AutotoolsPackage):\n \"\"\"X Window System Core Protocol.\n\n This package provides the headers and specification documents defining\n the X Window System Core Protocol, Version 11.\n\n It also includes a number of headers that aren't purely protocol related,\n but are depended upon by many other X Window System packages to provide\n common definitions and porting layer.\"\"\"\n\n homepage = \"http://cgit.freedesktop.org/xorg/proto/x11proto\"\n url = \"https://www.x.org/archive/individual/proto/xproto-7.0.31.tar.gz\"\n\n version('7.0.31', '04b925bf9e472c80f9212615cd684f1e')\n version('7.0.29', '16a78dd2c5ad73011105c96235f6a0af')\n\n depends_on('[email protected]:', type='build')\n depends_on('util-macros', type='build')\n", "path": "var/spack/repos/builtin/packages/xproto/package.py"}]}
| 1,804 | 147 |
gh_patches_debug_932
|
rasdani/github-patches
|
git_diff
|
praw-dev__praw-1304
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx stops emitting warnings if it encounters only one
**Describe the bug**
<!-- A clear and concise description of what the bug is. --> When running pre_push, if Sphinx runs into an warning, it does does print any more. When there are lots of warnings, it takes a lot of time to re-run pre_push per warning
I recommend adding the command line argument `--keep-going`. This will cause it to print all warnings.
**System Info**
- PRAW Version: Latest
</issue>
<code>
[start of pre_push.py]
1 #!/usr/bin/env python3
2 """Run static analysis on the project."""
3
4 import argparse
5 import sys
6 from os import path
7 from shutil import rmtree
8 from subprocess import CalledProcessError, check_call
9 from tempfile import mkdtemp
10
11 current_directory = path.abspath(path.join(__file__, ".."))
12
13
14 def do_process(args, shell=False):
15 """Run program provided by args.
16
17 Return True on success.
18
19 Output failed message on non-zero exit and return False.
20
21 Exit if command is not found.
22 """
23 print("Running: {}".format(" ".join(args)))
24 try:
25 check_call(args, shell=shell)
26 except CalledProcessError:
27 print("\nFailed: {}".format(" ".join(args)))
28 return False
29 except Exception as exc:
30 sys.stderr.write(str(exc) + "\n")
31 sys.exit(1)
32 return True
33
34
35 def run_static():
36 """Runs the static tests.
37
38 Returns a statuscode of 0 if everything ran correctly.
39 Otherwise, it will return statuscode 1
40 """
41 success = True
42 success &= do_process(
43 [
44 sys.executable,
45 path.join(current_directory, "tools", "static_word_checks.py"),
46 "--replace",
47 ]
48 )
49 success &= do_process(["black ."], shell=True)
50 success &= do_process(["flake8", "--exclude=.eggs,build,docs"])
51 success &= do_process(["pydocstyle", "praw"])
52 # success &= do_process(["pylint", "--rcfile=.pylintrc", "praw"])
53
54 tmp_dir = mkdtemp()
55 try:
56 success &= do_process(["sphinx-build", "-W", "docs", tmp_dir])
57 finally:
58 rmtree(tmp_dir)
59
60 return success
61
62
63 def run_unit():
64 """Runs the unit-tests.
65
66 Follows the behavior of the static tests,
67 where any failed tests cause pre_push.py to fail.
68 """
69 return do_process(
70 [sys.executable, path.join(current_directory, "setup.py"), "test"]
71 )
72
73
74 def main():
75 """Runs the main function.
76
77 usage: pre_push.py [-h] [-n] [-u] [-a]
78
79 Run static and/or unit-tests
80 """
81 parser = argparse.ArgumentParser(
82 description="Run static and/or unit-tests"
83 )
84 parser.add_argument(
85 "-n",
86 "--unstatic",
87 action="store_true",
88 help="Do not run static tests (black/flake8/pydocstyle/sphinx-build)",
89 default=False,
90 )
91 parser.add_argument(
92 "-u",
93 "--unit-tests",
94 "--unit",
95 action="store_true",
96 default=False,
97 help="Run the unit tests",
98 )
99 parser.add_argument(
100 "-a",
101 "--all",
102 action="store_true",
103 default=False,
104 help="Run all of the tests (static and unit). "
105 "Overrides the unstatic argument.",
106 )
107 args = parser.parse_args()
108 success = True
109 try:
110 if not args.unstatic or args.all:
111 success &= run_static()
112 if args.all or args.unit_tests:
113 success &= run_unit()
114 except KeyboardInterrupt:
115 return int(not False)
116 return int(not success)
117
118
119 if __name__ == "__main__":
120 exit_code = main()
121 print(
122 "\npre_push.py: Success!" if not exit_code else "\npre_push.py: Fail"
123 )
124 sys.exit(exit_code)
125
[end of pre_push.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_push.py b/pre_push.py
--- a/pre_push.py
+++ b/pre_push.py
@@ -53,7 +53,9 @@
tmp_dir = mkdtemp()
try:
- success &= do_process(["sphinx-build", "-W", "docs", tmp_dir])
+ success &= do_process(
+ ["sphinx-build", "-W", "--keep-going", "docs", tmp_dir]
+ )
finally:
rmtree(tmp_dir)
|
{"golden_diff": "diff --git a/pre_push.py b/pre_push.py\n--- a/pre_push.py\n+++ b/pre_push.py\n@@ -53,7 +53,9 @@\n \n tmp_dir = mkdtemp()\n try:\n- success &= do_process([\"sphinx-build\", \"-W\", \"docs\", tmp_dir])\n+ success &= do_process(\n+ [\"sphinx-build\", \"-W\", \"--keep-going\", \"docs\", tmp_dir]\n+ )\n finally:\n rmtree(tmp_dir)\n", "issue": "Sphinx stops emitting warnings if it encounters only one\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. --> When running pre_push, if Sphinx runs into an warning, it does does print any more. When there are lots of warnings, it takes a lot of time to re-run pre_push per warning\r\n\r\nI recommend adding the command line argument `--keep-going`. This will cause it to print all warnings.\r\n\r\n**System Info**\r\n - PRAW Version: Latest\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Run static analysis on the project.\"\"\"\n\nimport argparse\nimport sys\nfrom os import path\nfrom shutil import rmtree\nfrom subprocess import CalledProcessError, check_call\nfrom tempfile import mkdtemp\n\ncurrent_directory = path.abspath(path.join(__file__, \"..\"))\n\n\ndef do_process(args, shell=False):\n \"\"\"Run program provided by args.\n\n Return True on success.\n\n Output failed message on non-zero exit and return False.\n\n Exit if command is not found.\n \"\"\"\n print(\"Running: {}\".format(\" \".join(args)))\n try:\n check_call(args, shell=shell)\n except CalledProcessError:\n print(\"\\nFailed: {}\".format(\" \".join(args)))\n return False\n except Exception as exc:\n sys.stderr.write(str(exc) + \"\\n\")\n sys.exit(1)\n return True\n\n\ndef run_static():\n \"\"\"Runs the static tests.\n\n Returns a statuscode of 0 if everything ran correctly.\n Otherwise, it will return statuscode 1\n \"\"\"\n success = True\n success &= do_process(\n [\n sys.executable,\n path.join(current_directory, \"tools\", \"static_word_checks.py\"),\n \"--replace\",\n ]\n )\n success &= do_process([\"black .\"], shell=True)\n success &= do_process([\"flake8\", \"--exclude=.eggs,build,docs\"])\n success &= do_process([\"pydocstyle\", \"praw\"])\n # success &= do_process([\"pylint\", \"--rcfile=.pylintrc\", \"praw\"])\n\n tmp_dir = mkdtemp()\n try:\n success &= do_process([\"sphinx-build\", \"-W\", \"docs\", tmp_dir])\n finally:\n rmtree(tmp_dir)\n\n return success\n\n\ndef run_unit():\n \"\"\"Runs the unit-tests.\n\n Follows the behavior of the static tests,\n where any failed tests cause pre_push.py to fail.\n \"\"\"\n return do_process(\n [sys.executable, path.join(current_directory, \"setup.py\"), \"test\"]\n )\n\n\ndef main():\n \"\"\"Runs the main function.\n\n usage: pre_push.py [-h] [-n] [-u] [-a]\n\n Run static and/or unit-tests\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Run static and/or unit-tests\"\n )\n parser.add_argument(\n \"-n\",\n \"--unstatic\",\n action=\"store_true\",\n help=\"Do not run static tests (black/flake8/pydocstyle/sphinx-build)\",\n default=False,\n )\n parser.add_argument(\n \"-u\",\n \"--unit-tests\",\n \"--unit\",\n action=\"store_true\",\n default=False,\n help=\"Run the unit tests\",\n )\n parser.add_argument(\n \"-a\",\n \"--all\",\n action=\"store_true\",\n default=False,\n help=\"Run all of the tests (static and unit). \"\n \"Overrides the unstatic argument.\",\n )\n args = parser.parse_args()\n success = True\n try:\n if not args.unstatic or args.all:\n success &= run_static()\n if args.all or args.unit_tests:\n success &= run_unit()\n except KeyboardInterrupt:\n return int(not False)\n return int(not success)\n\n\nif __name__ == \"__main__\":\n exit_code = main()\n print(\n \"\\npre_push.py: Success!\" if not exit_code else \"\\npre_push.py: Fail\"\n )\n sys.exit(exit_code)\n", "path": "pre_push.py"}]}
| 1,646 | 107 |
gh_patches_debug_4208
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1363
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to set number_format to null for Money type
## Reproduce
1. Set up a Money column.
1. Submit a `PATCH` request to the columns API, e.g. `/api/db/v0/tables/15/columns/52/`
1. Send:
```json
{
"type": "MATHESAR_TYPES.MATHESAR_MONEY",
"display_options": {
"currency_symbol": "$",
"currency_symbol_location": "after-minus",
"number_format": "english"
}
}
```
Receive success. Good.
1. Now change `display_options.number_format` to `null`, sending:
```json
{
"type": "MATHESAR_TYPES.MATHESAR_MONEY",
"display_options": {
"currency_symbol": "$",
"currency_symbol_location": "after-minus",
"number_format": null
}
}
```
Expect success.
Receive:
```json
[
{
"code": 2024,
"field": "number_format",
"message": "This field may not be null.",
"detail": {}
}
]
```
</issue>
<code>
[start of mathesar/api/serializers/shared_serializers.py]
1 from django.core.exceptions import ImproperlyConfigured
2 from rest_framework import serializers
3
4 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
5 from mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type
6
7
8 class ReadOnlyPolymorphicSerializerMappingMixin:
9 """
10 This serializer mixin is helpful in serializing polymorphic models,
11 by switching to correct serializer based on the mapping field value.
12 """
13
14 def __new__(cls, *args, **kwargs):
15 if cls.serializers_mapping is None:
16 raise ImproperlyConfigured(
17 '`{cls}` is missing a '
18 '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)
19 )
20 return super().__new__(cls, *args, **kwargs)
21
22 def __init__(self, *args, **kwargs):
23 super().__init__(*args, **kwargs)
24 self.serializers_cls_mapping = {}
25 serializers_mapping = self.serializers_mapping
26 self.serializers_mapping = {}
27 for identifier, serializer_cls in serializers_mapping.items():
28 if callable(serializer_cls):
29 serializer = serializer_cls(*args, **kwargs)
30 serializer.parent = self
31 else:
32 serializer = serializer_cls
33 self.serializers_mapping[identifier] = serializer
34 self.serializers_cls_mapping[identifier] = serializer_cls
35
36 def to_representation(self, instance):
37 serializer = self.serializers_mapping.get(self.get_mapping_field(), None)
38 if serializer is not None:
39 return serializer.to_representation(instance)
40 else:
41 return instance
42
43 def get_mapping_field(self):
44 mapping_field = getattr(self, "mapping_field", None)
45 if mapping_field is None:
46 raise Exception(
47 "Add a `mapping_field` to be used as a identifier"
48 "or override this method to return a identifier to identify a proper serializer"
49 )
50 return mapping_field
51
52
53 class ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):
54 def to_internal_value(self, data):
55 serializer = self.serializers_mapping.get(self.get_mapping_field())
56 if serializer is not None:
57 return serializer.to_internal_value(data=data)
58 else:
59 data = {}
60 return data
61
62
63 class MonkeyPatchPartial:
64 """
65 Work around bug #3847 in djangorestframework by monkey-patching the partial
66 attribute of the root serializer during the call to validate_empty_values.
67 https://github.com/encode/django-rest-framework/issues/3847
68 """
69
70 def __init__(self, root):
71 self._root = root
72
73 def __enter__(self):
74 self._old = getattr(self._root, 'partial')
75 setattr(self._root, 'partial', False)
76
77 def __exit__(self, *args):
78 setattr(self._root, 'partial', self._old)
79
80
81 class OverrideRootPartialMixin:
82 """
83 This mixin is used to convert a serializer into a partial serializer,
84 based on the serializer `partial` property rather than the parent's `partial` property.
85 Refer to the issue
86 https://github.com/encode/django-rest-framework/issues/3847
87 """
88
89 def run_validation(self, *args, **kwargs):
90 if not self.partial:
91 with MonkeyPatchPartial(self.root):
92 return super().run_validation(*args, **kwargs)
93 return super().run_validation(*args, **kwargs)
94
95
96 class MathesarPolymorphicErrorMixin(MathesarErrorMessageMixin):
97 def get_serializer_fields(self):
98 return self.serializers_mapping[self.get_mapping_field()].fields
99
100
101 class CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):
102 TRUE = serializers.CharField()
103 FALSE = serializers.CharField()
104
105
106 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'db_type'
107
108
109 class BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
110 input = serializers.ChoiceField(choices=[("dropdown", "dropdown"), ("checkbox", "checkbox")])
111 custom_labels = CustomBooleanLabelSerializer(required=False)
112
113
114 class AbstractNumberDisplayOptionSerializer(serializers.Serializer):
115 number_format = serializers.ChoiceField(required=False, choices=['english', 'german', 'french', 'hindi', 'swiss'])
116
117
118 class NumberDisplayOptionSerializer(
119 MathesarErrorMessageMixin,
120 OverrideRootPartialMixin,
121 AbstractNumberDisplayOptionSerializer
122 ):
123 show_as_percentage = serializers.BooleanField(default=False)
124
125
126 class MoneyDisplayOptionSerializer(
127 MathesarErrorMessageMixin,
128 OverrideRootPartialMixin,
129 AbstractNumberDisplayOptionSerializer
130 ):
131 currency_symbol = serializers.CharField()
132 currency_symbol_location = serializers.ChoiceField(choices=['after-minus', 'end-with-space'])
133
134
135 class TimeFormatDisplayOptionSerializer(
136 MathesarErrorMessageMixin,
137 OverrideRootPartialMixin,
138 serializers.Serializer
139 ):
140 format = serializers.CharField(max_length=255)
141
142
143 class DateTimeFormatDisplayOptionSerializer(
144 MathesarErrorMessageMixin,
145 OverrideRootPartialMixin,
146 serializers.Serializer
147 ):
148 time_format = serializers.CharField(max_length=255)
149 date_format = serializers.CharField(max_length=255)
150
151
152 class DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
153 min = serializers.CharField(max_length=255)
154 max = serializers.CharField(max_length=255)
155 show_units = serializers.BooleanField()
156
157
158 class DisplayOptionsMappingSerializer(
159 OverrideRootPartialMixin,
160 MathesarPolymorphicErrorMixin,
161 ReadWritePolymorphicSerializerMappingMixin,
162 serializers.Serializer
163 ):
164 serializers_mapping = {
165 MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,
166 MathesarTypeIdentifier.DATETIME.value: DateTimeFormatDisplayOptionSerializer,
167 MathesarTypeIdentifier.DATE.value: TimeFormatDisplayOptionSerializer,
168 MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,
169 MathesarTypeIdentifier.MONEY.value: MoneyDisplayOptionSerializer,
170 MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,
171 MathesarTypeIdentifier.TIME.value: TimeFormatDisplayOptionSerializer,
172 }
173
174 def get_mapping_field(self):
175 db_type = self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]
176 mathesar_type = get_mathesar_type_from_db_type(db_type)
177 return mathesar_type
178
[end of mathesar/api/serializers/shared_serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py
--- a/mathesar/api/serializers/shared_serializers.py
+++ b/mathesar/api/serializers/shared_serializers.py
@@ -112,7 +112,7 @@
class AbstractNumberDisplayOptionSerializer(serializers.Serializer):
- number_format = serializers.ChoiceField(required=False, choices=['english', 'german', 'french', 'hindi', 'swiss'])
+ number_format = serializers.ChoiceField(required=False, allow_null=True, choices=['english', 'german', 'french', 'hindi', 'swiss'])
class NumberDisplayOptionSerializer(
|
{"golden_diff": "diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py\n--- a/mathesar/api/serializers/shared_serializers.py\n+++ b/mathesar/api/serializers/shared_serializers.py\n@@ -112,7 +112,7 @@\n \n \n class AbstractNumberDisplayOptionSerializer(serializers.Serializer):\n- number_format = serializers.ChoiceField(required=False, choices=['english', 'german', 'french', 'hindi', 'swiss'])\n+ number_format = serializers.ChoiceField(required=False, allow_null=True, choices=['english', 'german', 'french', 'hindi', 'swiss'])\n \n \n class NumberDisplayOptionSerializer(\n", "issue": "Unable to set number_format to null for Money type\n## Reproduce\n\n1. Set up a Money column.\n\n1. Submit a `PATCH` request to the columns API, e.g. `/api/db/v0/tables/15/columns/52/`\n\n1. Send:\n\n ```json\n {\n \"type\": \"MATHESAR_TYPES.MATHESAR_MONEY\",\n \"display_options\": {\n \"currency_symbol\": \"$\",\n \"currency_symbol_location\": \"after-minus\",\n \"number_format\": \"english\"\n }\n }\n ```\n\n Receive success. Good.\n\n1. Now change `display_options.number_format` to `null`, sending:\n\n ```json\n {\n \"type\": \"MATHESAR_TYPES.MATHESAR_MONEY\",\n \"display_options\": {\n \"currency_symbol\": \"$\",\n \"currency_symbol_location\": \"after-minus\",\n \"number_format\": null\n }\n }\n ```\n \n Expect success.\n\n Receive:\n\n ```json\n [\n {\n \"code\": 2024,\n \"field\": \"number_format\",\n \"message\": \"This field may not be null.\",\n \"detail\": {}\n }\n ]\n ```\n\n", "before_files": [{"content": "from django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n for identifier, serializer_cls in serializers_mapping.items():\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def to_representation(self, instance):\n serializer = self.serializers_mapping.get(self.get_mapping_field(), None)\n if serializer is not None:\n return serializer.to_representation(instance)\n else:\n return instance\n\n def get_mapping_field(self):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.serializers_mapping.get(self.get_mapping_field())\n if serializer is not None:\n return serializer.to_internal_value(data=data)\n else:\n data = {}\n return data\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass MathesarPolymorphicErrorMixin(MathesarErrorMessageMixin):\n def get_serializer_fields(self):\n return self.serializers_mapping[self.get_mapping_field()].fields\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'db_type'\n\n\nclass BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", \"dropdown\"), (\"checkbox\", \"checkbox\")])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nclass AbstractNumberDisplayOptionSerializer(serializers.Serializer):\n number_format = serializers.ChoiceField(required=False, choices=['english', 'german', 'french', 'hindi', 'swiss'])\n\n\nclass NumberDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n AbstractNumberDisplayOptionSerializer\n):\n show_as_percentage = serializers.BooleanField(default=False)\n\n\nclass MoneyDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n AbstractNumberDisplayOptionSerializer\n):\n currency_symbol = serializers.CharField()\n currency_symbol_location = serializers.ChoiceField(choices=['after-minus', 'end-with-space'])\n\n\nclass TimeFormatDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(max_length=255)\n\n\nclass DateTimeFormatDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n time_format = serializers.CharField(max_length=255)\n date_format = serializers.CharField(max_length=255)\n\n\nclass DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n min = serializers.CharField(max_length=255)\n max = serializers.CharField(max_length=255)\n show_units = serializers.BooleanField()\n\n\nclass DisplayOptionsMappingSerializer(\n OverrideRootPartialMixin,\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,\n MathesarTypeIdentifier.DATETIME.value: DateTimeFormatDisplayOptionSerializer,\n MathesarTypeIdentifier.DATE.value: TimeFormatDisplayOptionSerializer,\n MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,\n MathesarTypeIdentifier.MONEY.value: MoneyDisplayOptionSerializer,\n MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,\n MathesarTypeIdentifier.TIME.value: TimeFormatDisplayOptionSerializer,\n }\n\n def get_mapping_field(self):\n db_type = self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]\n mathesar_type = get_mathesar_type_from_db_type(db_type)\n return mathesar_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}]}
| 2,545 | 151 |
gh_patches_debug_314
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-20788
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Pan and zoom" cuts off images instead of using the available space
If you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom… the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around.
This leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used.
This is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837:

Instead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image.
</issue>
<code>
[start of version.py]
1 import os
2
3 ZULIP_VERSION = "5.0-dev+git"
4
5 # Add information on number of commits and commit hash to version, if available
6 zulip_git_version_file = os.path.join(
7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
8 )
9 lines = [ZULIP_VERSION, ""]
10 if os.path.exists(zulip_git_version_file):
11 with open(zulip_git_version_file) as f:
12 lines = f.readlines() + ["", ""]
13 ZULIP_VERSION = lines.pop(0).strip()
14 ZULIP_MERGE_BASE = lines.pop(0).strip()
15
16 LATEST_MAJOR_VERSION = "4.0"
17 LATEST_RELEASE_VERSION = "4.9"
18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2021/05/13/zulip-4-0-released/"
19
20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
21 # prevented from connecting to the Zulip server. Versions above
22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
23 # a banner at the top of the page asking the user to upgrade.
24 DESKTOP_MINIMUM_VERSION = "5.2.0"
25 DESKTOP_WARNING_VERSION = "5.4.3"
26
27 # Bump the API_FEATURE_LEVEL whenever an API change is made
28 # that clients might want to condition on. If we forget at
29 # the time we make the change, then bump it later as soon
30 # as we notice; clients using API_FEATURE_LEVEL will just not
31 # use the new feature/API until the bump.
32 #
33 # Changes should be accompanied by documentation explaining what the
34 # new level means in templates/zerver/api/changelog.md, as well as
35 # "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
36 API_FEATURE_LEVEL = 115
37
38 # Bump the minor PROVISION_VERSION to indicate that folks should provision
39 # only when going from an old version of the code to a newer version. Bump
40 # the major version to indicate that folks should provision in both
41 # directions.
42
43 # Typically,
44 # * adding a dependency only requires a minor version bump;
45 # * removing a dependency requires a major version bump;
46 # * upgrading a dependency requires a major version bump, unless the
47 # upgraded dependency is backwards compatible with all of our
48 # historical commits sharing the same major version, in which case a
49 # minor version bump suffices.
50
51 PROVISION_VERSION = "173.3"
52
[end of version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -48,4 +48,4 @@
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
-PROVISION_VERSION = "173.3"
+PROVISION_VERSION = "173.4"
|
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -48,4 +48,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = \"173.3\"\n+PROVISION_VERSION = \"173.4\"\n", "issue": "\"Pan and zoom\" cuts off images instead of using the available space\nIf you have a tall image and a wide monitor (and wide browser viewport), and you try to zoom\u2026 the image stays trapped inside the same box it occupied before you even tried to zoom. If the image is super wide instead of tall, the same thing happens the other way around.\r\n\r\nThis leads to a lot of frustrating panning around, to look at the different parts of the image through this narrow keyhole, while tons of screen space next to it doesn't get used.\r\n\r\nThis is the biggest of the issues described by @vanclute in #18939. It was reported again by @alexanderglueck as #19837, and I just ran into it myself ([chat](https://chat.zulip.org/#narrow/stream/6-frontend/topic/pan.2Fzoom/near/1308717)). Here's a nice illustration from #19837: \r\n\r\n\r\nInstead, when zooming we should use the full space available. This may be bigger than the area the image occupied when it was scaled down to fit completely in the space available, because the available box may have a different aspect ratio from the image.\r\n\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.9\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 115\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"173.3\"\n", "path": "version.py"}]}
| 1,497 | 80 |
gh_patches_debug_11329
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-4675
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] odbc/2.3.7: sysconfdir is in Conan local cache
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **odbc/2.3.7**
### Steps to reproduce (Include if Applicable)
The odbc library has a configuration option called `--sysconfdir` that specifies the default location for configuration files. By default the value is `${prefix}/etc`. The problem is that `${prefix}` ends up being a location in the Conan local cache, for example:
```
/userhome/epederson/.conan/data/odbc/2.3.7/bincrafters/stable/package/761432f3453a17c960259b5b2605153e3bcffd7d/etc
```
This location is only valid on the machine that the package was built on.
The typical value for `sysconfdir` is `/etc`.
I will provide a PR that adds `--sysconfdir=/etc` to the configuration settings.
</issue>
<code>
[start of recipes/odbc/all/conanfile.py]
1 import glob
2 import os
3 from conans import ConanFile, AutoToolsBuildEnvironment, tools
4 from conans.errors import ConanInvalidConfiguration
5
6
7 class OdbcConan(ConanFile):
8 name = "odbc"
9 description = "Package providing unixODBC"
10 topics = ("odbc", "database", "dbms", "data-access")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "http://www.unixodbc.org"
13 license = ("LGPL-2.1", "GPL-2.1")
14 exports_sources = "patches/**"
15 settings = "os", "compiler", "build_type", "arch"
16 options = {
17 "shared": [True, False],
18 "fPIC": [True, False],
19 "with_libiconv": [True, False]
20 }
21 default_options = {
22 "shared": False,
23 "fPIC": True,
24 "with_libiconv": True
25 }
26
27 _autotools = None
28
29 @property
30 def _source_subfolder(self):
31 return "source_subfolder"
32
33 def configure(self):
34 if self.options.shared:
35 del self.options.fPIC
36 del self.settings.compiler.libcxx
37 del self.settings.compiler.cppstd
38 if self.settings.os == "Windows":
39 raise ConanInvalidConfiguration("Windows not supported yet. Please, open an issue if you need such support")
40
41 def requirements(self):
42 if self.options.with_libiconv:
43 self.requires("libiconv/1.16")
44
45 def source(self):
46 tools.get(**self.conan_data["sources"][self.version])
47 extracted_dir = "unixODBC-%s" % self.version
48 os.rename(extracted_dir, self._source_subfolder)
49
50 def _configure_autotools(self):
51 if self._autotools:
52 return self._autotools
53 self._autotools = AutoToolsBuildEnvironment(self)
54 static_flag = "no" if self.options.shared else "yes"
55 shared_flag = "yes" if self.options.shared else "no"
56 libiconv_flag = "yes" if self.options.with_libiconv else "no"
57 args = ["--enable-static=%s" % static_flag,
58 "--enable-shared=%s" % shared_flag,
59 "--enable-ltdl-install",
60 "--enable-iconv=%s" % libiconv_flag]
61 if self.options.with_libiconv:
62 libiconv_prefix = self.deps_cpp_info["libiconv"].rootpath
63 args.append("--with-libiconv-prefix=%s" % libiconv_prefix)
64 self._autotools.configure(configure_dir=self._source_subfolder, args=args)
65 return self._autotools
66
67 def build(self):
68 for patch in self.conan_data.get("patches", {}).get(self.version, []):
69 tools.patch(**patch)
70 autotools = self._configure_autotools()
71 autotools.make()
72
73 def package(self):
74 self.copy("COPYING", src=self._source_subfolder, dst="licenses")
75 autotools = self._configure_autotools()
76 autotools.install()
77 tools.rmdir(os.path.join(self.package_folder, "share"))
78 tools.rmdir(os.path.join(self.package_folder, "etc"))
79 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
80 for la_file in glob.glob(os.path.join(self.package_folder, "lib", "*.la")):
81 os.remove(la_file)
82
83 def package_info(self):
84 self.cpp_info.names["cmake_find_package"] = "ODBC"
85 self.cpp_info.names["cmake_find_package_multi"] = "ODBC"
86 # odbc
87 self.cpp_info.components["_odbc"].names["pkg_config"] = "odbc"
88 self.cpp_info.components["_odbc"].libs = ["odbc"]
89 self.cpp_info.components["_odbc"].requires = ["odbcltdl"]
90 if self.options.with_libiconv:
91 self.cpp_info.components["_odbc"].requires.append("libiconv::libiconv")
92 # odbcinst
93 self.cpp_info.components["odbcinst"].names["pkg_config"] = "odbcinst"
94 self.cpp_info.components["odbcinst"].libs = ["odbcinst"]
95 self.cpp_info.components["odbcinst"].requires = ["odbcltdl"]
96 # odbccr
97 self.cpp_info.components["odbccr"].names["pkg_config"] = "odbccr"
98 self.cpp_info.components["odbccr"].libs = ["odbccr"]
99
100 self.cpp_info.components["odbcltdl"].libs = ["ltdl"]
101
102 if self.settings.os == "Linux":
103 self.cpp_info.components["_odbc"].system_libs = ["pthread"]
104 self.cpp_info.components["odbcinst"].system_libs = ["pthread"]
105 self.cpp_info.components["odbcltdl"].system_libs = ["dl"]
106
107 bin_path = os.path.join(self.package_folder, "bin")
108 self.output.info("Appending PATH environment variable: {}".format(bin_path))
109 self.env_info.PATH.append(bin_path)
110
[end of recipes/odbc/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/odbc/all/conanfile.py b/recipes/odbc/all/conanfile.py
--- a/recipes/odbc/all/conanfile.py
+++ b/recipes/odbc/all/conanfile.py
@@ -57,7 +57,8 @@
args = ["--enable-static=%s" % static_flag,
"--enable-shared=%s" % shared_flag,
"--enable-ltdl-install",
- "--enable-iconv=%s" % libiconv_flag]
+ "--enable-iconv=%s" % libiconv_flag,
+ "--sysconfdir=/etc"]
if self.options.with_libiconv:
libiconv_prefix = self.deps_cpp_info["libiconv"].rootpath
args.append("--with-libiconv-prefix=%s" % libiconv_prefix)
|
{"golden_diff": "diff --git a/recipes/odbc/all/conanfile.py b/recipes/odbc/all/conanfile.py\n--- a/recipes/odbc/all/conanfile.py\n+++ b/recipes/odbc/all/conanfile.py\n@@ -57,7 +57,8 @@\n args = [\"--enable-static=%s\" % static_flag,\n \"--enable-shared=%s\" % shared_flag,\n \"--enable-ltdl-install\",\n- \"--enable-iconv=%s\" % libiconv_flag]\n+ \"--enable-iconv=%s\" % libiconv_flag,\n+ \"--sysconfdir=/etc\"]\n if self.options.with_libiconv:\n libiconv_prefix = self.deps_cpp_info[\"libiconv\"].rootpath\n args.append(\"--with-libiconv-prefix=%s\" % libiconv_prefix)\n", "issue": "[package] odbc/2.3.7: sysconfdir is in Conan local cache\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **odbc/2.3.7**\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nThe odbc library has a configuration option called `--sysconfdir` that specifies the default location for configuration files. By default the value is `${prefix}/etc`. The problem is that `${prefix}` ends up being a location in the Conan local cache, for example:\r\n```\r\n/userhome/epederson/.conan/data/odbc/2.3.7/bincrafters/stable/package/761432f3453a17c960259b5b2605153e3bcffd7d/etc\r\n```\r\nThis location is only valid on the machine that the package was built on. \r\n\r\nThe typical value for `sysconfdir` is `/etc`. \r\n\r\nI will provide a PR that adds `--sysconfdir=/etc` to the configuration settings.\n", "before_files": [{"content": "import glob\nimport os\nfrom conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass OdbcConan(ConanFile):\n name = \"odbc\"\n description = \"Package providing unixODBC\"\n topics = (\"odbc\", \"database\", \"dbms\", \"data-access\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.unixodbc.org\"\n license = (\"LGPL-2.1\", \"GPL-2.1\")\n exports_sources = \"patches/**\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_libiconv\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_libiconv\": True\n }\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Windows not supported yet. Please, open an issue if you need such support\")\n\n def requirements(self):\n if self.options.with_libiconv:\n self.requires(\"libiconv/1.16\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"unixODBC-%s\" % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n static_flag = \"no\" if self.options.shared else \"yes\"\n shared_flag = \"yes\" if self.options.shared else \"no\"\n libiconv_flag = \"yes\" if self.options.with_libiconv else \"no\"\n args = [\"--enable-static=%s\" % static_flag,\n \"--enable-shared=%s\" % shared_flag,\n \"--enable-ltdl-install\",\n \"--enable-iconv=%s\" % libiconv_flag]\n if self.options.with_libiconv:\n libiconv_prefix = self.deps_cpp_info[\"libiconv\"].rootpath\n args.append(\"--with-libiconv-prefix=%s\" % libiconv_prefix)\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n return self._autotools\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"etc\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n for la_file in glob.glob(os.path.join(self.package_folder, \"lib\", \"*.la\")):\n os.remove(la_file)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"ODBC\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"ODBC\"\n # odbc\n self.cpp_info.components[\"_odbc\"].names[\"pkg_config\"] = \"odbc\"\n self.cpp_info.components[\"_odbc\"].libs = [\"odbc\"]\n self.cpp_info.components[\"_odbc\"].requires = [\"odbcltdl\"]\n if self.options.with_libiconv:\n self.cpp_info.components[\"_odbc\"].requires.append(\"libiconv::libiconv\")\n # odbcinst\n self.cpp_info.components[\"odbcinst\"].names[\"pkg_config\"] = \"odbcinst\"\n self.cpp_info.components[\"odbcinst\"].libs = [\"odbcinst\"]\n self.cpp_info.components[\"odbcinst\"].requires = [\"odbcltdl\"]\n # odbccr\n self.cpp_info.components[\"odbccr\"].names[\"pkg_config\"] = \"odbccr\"\n self.cpp_info.components[\"odbccr\"].libs = [\"odbccr\"]\n\n self.cpp_info.components[\"odbcltdl\"].libs = [\"ltdl\"]\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_odbc\"].system_libs = [\"pthread\"]\n self.cpp_info.components[\"odbcinst\"].system_libs = [\"pthread\"]\n self.cpp_info.components[\"odbcltdl\"].system_libs = [\"dl\"]\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/odbc/all/conanfile.py"}]}
| 2,139 | 182 |
gh_patches_debug_10406
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-9925
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unnecessary remote repository fetches
DVC has started cloning external repositories even on cases where it's not strictly required.
Anything that uses `outs_graph` has started cloning external repositories.
Commands like `add/plots modify/pull/push/fetch/move` are affected, and maybe potentially others.
### Reproduce
```bash
git clone https://github.com/iterative/example-get-started.git
cd example-get-started
dvc add foo
```
### Expected
No cloning occurs.
</issue>
<code>
[start of dvc/repo/graph.py]
1 from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Set, TypeVar
2
3 from dvc.fs import localfs
4 from dvc.utils.fs import path_isin
5
6 if TYPE_CHECKING:
7 from networkx import DiGraph
8
9 from dvc.stage import Stage
10
11 T = TypeVar("T")
12
13
14 def check_acyclic(graph: "DiGraph") -> None:
15 import networkx as nx
16
17 from dvc.exceptions import CyclicGraphError
18
19 try:
20 edges = nx.find_cycle(graph, orientation="original")
21 except nx.NetworkXNoCycle:
22 return
23
24 stages: Set["Stage"] = set()
25 for from_node, to_node, _ in edges:
26 stages.add(from_node)
27 stages.add(to_node)
28
29 raise CyclicGraphError(list(stages))
30
31
32 def get_pipeline(pipelines, node):
33 found = [i for i in pipelines if i.has_node(node)]
34 if not found:
35 return None
36
37 assert len(found) == 1
38 return found[0]
39
40
41 def get_pipelines(graph: "DiGraph"):
42 import networkx as nx
43
44 return [graph.subgraph(c).copy() for c in nx.weakly_connected_components(graph)]
45
46
47 def get_subgraph_of_nodes(
48 graph: "DiGraph", sources: Optional[List[Any]] = None, downstream: bool = False
49 ) -> "DiGraph":
50 from networkx import dfs_postorder_nodes, reverse_view
51
52 if not sources:
53 return graph
54
55 g = reverse_view(graph) if downstream else graph
56 nodes = []
57 for source in sources:
58 nodes.extend(dfs_postorder_nodes(g, source))
59 return graph.subgraph(nodes)
60
61
62 def collect_pipeline(stage: "Stage", graph: "DiGraph") -> Iterator["Stage"]:
63 import networkx as nx
64
65 pipeline = get_pipeline(get_pipelines(graph), stage)
66 if not pipeline:
67 return iter([])
68
69 return nx.dfs_postorder_nodes(pipeline, stage)
70
71
72 def collect_inside_path(path: str, graph: "DiGraph") -> List["Stage"]:
73 import networkx as nx
74
75 stages = nx.dfs_postorder_nodes(graph)
76 return [stage for stage in stages if path_isin(stage.path, path)]
77
78
79 def build_graph(stages, outs_trie=None):
80 """Generate a graph by using the given stages on the given directory
81
82 The nodes of the graph are the stage's path relative to the root.
83
84 Edges are created when the output of one stage is used as a
85 dependency in other stage.
86
87 The direction of the edges goes from the stage to its dependency:
88
89 For example, running the following:
90
91 $ dvc run -o A "echo A > A"
92 $ dvc run -d A -o B "echo B > B"
93 $ dvc run -d B -o C "echo C > C"
94
95 Will create the following graph:
96
97 ancestors <--
98 |
99 C.dvc -> B.dvc -> A.dvc
100 | |
101 | --> descendants
102 |
103 ------- pipeline ------>
104 |
105 v
106 (weakly connected components)
107
108 Args:
109 stages (list): used to build a graph from
110
111 Raises:
112 OutputDuplicationError: two outputs with the same path
113 StagePathAsOutputError: stage inside an output directory
114 OverlappingOutputPathsError: output inside output directory
115 CyclicGraphError: resulting graph has cycles
116 """
117 import networkx as nx
118
119 from dvc.exceptions import StagePathAsOutputError
120
121 from .trie import build_outs_trie
122
123 graph = nx.DiGraph()
124
125 # Use trie to efficiently find overlapping outs and deps
126 outs_trie = outs_trie or build_outs_trie(stages)
127
128 for stage in stages:
129 out = outs_trie.shortest_prefix(localfs.path.parts(stage.path)).value
130 if out:
131 raise StagePathAsOutputError(stage, str(out))
132
133 # Building graph
134 graph.add_nodes_from(stages)
135 for stage in stages:
136 if stage.is_repo_import:
137 continue
138
139 for dep in stage.deps:
140 dep_key = dep.fs.path.parts(dep.fs_path)
141 overlapping = [n.value for n in outs_trie.prefixes(dep_key)]
142 if outs_trie.has_subtrie(dep_key):
143 overlapping.extend(outs_trie.values(prefix=dep_key))
144
145 graph.add_edges_from((stage, out.stage) for out in overlapping)
146 check_acyclic(graph)
147
148 return graph
149
150
151 # NOTE: using stage graph instead of just list of stages to make sure that it
152 # has already passed all the sanity checks like cycles/overlapping outputs and
153 # so on.
154 def build_outs_graph(graph, outs_trie):
155 import networkx as nx
156
157 outs_graph = nx.DiGraph()
158
159 outs_graph.add_nodes_from(outs_trie.values())
160 for stage in graph.nodes():
161 for dep in stage.deps:
162 if dep.fs_path is None:
163 # RepoDependency don't have a path
164 continue
165 dep_key = dep.fs.path.parts(dep.fs_path)
166 overlapping = [n.value for n in outs_trie.prefixes(dep_key)]
167 if outs_trie.has_subtrie(dep_key):
168 overlapping.extend(outs_trie.values(prefix=dep_key))
169
170 for from_out in stage.outs:
171 outs_graph.add_edges_from((from_out, out) for out in overlapping)
172 return outs_graph
173
[end of dvc/repo/graph.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/repo/graph.py b/dvc/repo/graph.py
--- a/dvc/repo/graph.py
+++ b/dvc/repo/graph.py
@@ -158,10 +158,9 @@
outs_graph.add_nodes_from(outs_trie.values())
for stage in graph.nodes():
+ if stage.is_repo_import:
+ continue
for dep in stage.deps:
- if dep.fs_path is None:
- # RepoDependency don't have a path
- continue
dep_key = dep.fs.path.parts(dep.fs_path)
overlapping = [n.value for n in outs_trie.prefixes(dep_key)]
if outs_trie.has_subtrie(dep_key):
|
{"golden_diff": "diff --git a/dvc/repo/graph.py b/dvc/repo/graph.py\n--- a/dvc/repo/graph.py\n+++ b/dvc/repo/graph.py\n@@ -158,10 +158,9 @@\n \n outs_graph.add_nodes_from(outs_trie.values())\n for stage in graph.nodes():\n+ if stage.is_repo_import:\n+ continue\n for dep in stage.deps:\n- if dep.fs_path is None:\n- # RepoDependency don't have a path\n- continue\n dep_key = dep.fs.path.parts(dep.fs_path)\n overlapping = [n.value for n in outs_trie.prefixes(dep_key)]\n if outs_trie.has_subtrie(dep_key):\n", "issue": "unnecessary remote repository fetches\nDVC has started cloning external repositories even on cases where it's not strictly required. \r\nAnything that uses `outs_graph` has started cloning external repositories.\r\n\r\nCommands like `add/plots modify/pull/push/fetch/move` are affected, and maybe potentially others.\r\n\r\n### Reproduce\r\n\r\n```bash\r\ngit clone https://github.com/iterative/example-get-started.git\r\ncd example-get-started\r\ndvc add foo\r\n```\r\n\r\n### Expected\r\n\r\nNo cloning occurs.\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Set, TypeVar\n\nfrom dvc.fs import localfs\nfrom dvc.utils.fs import path_isin\n\nif TYPE_CHECKING:\n from networkx import DiGraph\n\n from dvc.stage import Stage\n\nT = TypeVar(\"T\")\n\n\ndef check_acyclic(graph: \"DiGraph\") -> None:\n import networkx as nx\n\n from dvc.exceptions import CyclicGraphError\n\n try:\n edges = nx.find_cycle(graph, orientation=\"original\")\n except nx.NetworkXNoCycle:\n return\n\n stages: Set[\"Stage\"] = set()\n for from_node, to_node, _ in edges:\n stages.add(from_node)\n stages.add(to_node)\n\n raise CyclicGraphError(list(stages))\n\n\ndef get_pipeline(pipelines, node):\n found = [i for i in pipelines if i.has_node(node)]\n if not found:\n return None\n\n assert len(found) == 1\n return found[0]\n\n\ndef get_pipelines(graph: \"DiGraph\"):\n import networkx as nx\n\n return [graph.subgraph(c).copy() for c in nx.weakly_connected_components(graph)]\n\n\ndef get_subgraph_of_nodes(\n graph: \"DiGraph\", sources: Optional[List[Any]] = None, downstream: bool = False\n) -> \"DiGraph\":\n from networkx import dfs_postorder_nodes, reverse_view\n\n if not sources:\n return graph\n\n g = reverse_view(graph) if downstream else graph\n nodes = []\n for source in sources:\n nodes.extend(dfs_postorder_nodes(g, source))\n return graph.subgraph(nodes)\n\n\ndef collect_pipeline(stage: \"Stage\", graph: \"DiGraph\") -> Iterator[\"Stage\"]:\n import networkx as nx\n\n pipeline = get_pipeline(get_pipelines(graph), stage)\n if not pipeline:\n return iter([])\n\n return nx.dfs_postorder_nodes(pipeline, stage)\n\n\ndef collect_inside_path(path: str, graph: \"DiGraph\") -> List[\"Stage\"]:\n import networkx as nx\n\n stages = nx.dfs_postorder_nodes(graph)\n return [stage for stage in stages if path_isin(stage.path, path)]\n\n\ndef build_graph(stages, outs_trie=None):\n \"\"\"Generate a graph by using the given stages on the given directory\n\n The nodes of the graph are the stage's path relative to the root.\n\n Edges are created when the output of one stage is used as a\n dependency in other stage.\n\n The direction of the edges goes from the stage to its dependency:\n\n For example, running the following:\n\n $ dvc run -o A \"echo A > A\"\n $ dvc run -d A -o B \"echo B > B\"\n $ dvc run -d B -o C \"echo C > C\"\n\n Will create the following graph:\n\n ancestors <--\n |\n C.dvc -> B.dvc -> A.dvc\n | |\n | --> descendants\n |\n ------- pipeline ------>\n |\n v\n (weakly connected components)\n\n Args:\n stages (list): used to build a graph from\n\n Raises:\n OutputDuplicationError: two outputs with the same path\n StagePathAsOutputError: stage inside an output directory\n OverlappingOutputPathsError: output inside output directory\n CyclicGraphError: resulting graph has cycles\n \"\"\"\n import networkx as nx\n\n from dvc.exceptions import StagePathAsOutputError\n\n from .trie import build_outs_trie\n\n graph = nx.DiGraph()\n\n # Use trie to efficiently find overlapping outs and deps\n outs_trie = outs_trie or build_outs_trie(stages)\n\n for stage in stages:\n out = outs_trie.shortest_prefix(localfs.path.parts(stage.path)).value\n if out:\n raise StagePathAsOutputError(stage, str(out))\n\n # Building graph\n graph.add_nodes_from(stages)\n for stage in stages:\n if stage.is_repo_import:\n continue\n\n for dep in stage.deps:\n dep_key = dep.fs.path.parts(dep.fs_path)\n overlapping = [n.value for n in outs_trie.prefixes(dep_key)]\n if outs_trie.has_subtrie(dep_key):\n overlapping.extend(outs_trie.values(prefix=dep_key))\n\n graph.add_edges_from((stage, out.stage) for out in overlapping)\n check_acyclic(graph)\n\n return graph\n\n\n# NOTE: using stage graph instead of just list of stages to make sure that it\n# has already passed all the sanity checks like cycles/overlapping outputs and\n# so on.\ndef build_outs_graph(graph, outs_trie):\n import networkx as nx\n\n outs_graph = nx.DiGraph()\n\n outs_graph.add_nodes_from(outs_trie.values())\n for stage in graph.nodes():\n for dep in stage.deps:\n if dep.fs_path is None:\n # RepoDependency don't have a path\n continue\n dep_key = dep.fs.path.parts(dep.fs_path)\n overlapping = [n.value for n in outs_trie.prefixes(dep_key)]\n if outs_trie.has_subtrie(dep_key):\n overlapping.extend(outs_trie.values(prefix=dep_key))\n\n for from_out in stage.outs:\n outs_graph.add_edges_from((from_out, out) for out in overlapping)\n return outs_graph\n", "path": "dvc/repo/graph.py"}]}
| 2,250 | 155 |
gh_patches_debug_7854
|
rasdani/github-patches
|
git_diff
|
coala__coala-4969
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docstring for coala_modes.py mode_normal
This function should have a proper docstring, and indicate that `log_printer` is unused & deprecated.
</issue>
<code>
[start of coalib/coala_modes.py]
1 def mode_normal(console_printer, log_printer, args, debug=False):
2 import functools
3
4 from coalib.coala_main import run_coala
5 from coalib.output.ConsoleInteraction import (
6 acquire_settings, nothing_done,
7 print_results, print_section_beginning)
8
9 partial_print_sec_beg = functools.partial(
10 print_section_beginning,
11 console_printer)
12 results, exitcode, _ = run_coala(
13 print_results=print_results,
14 acquire_settings=acquire_settings,
15 print_section_beginning=partial_print_sec_beg,
16 nothing_done=nothing_done,
17 console_printer=console_printer,
18 args=args,
19 debug=debug)
20
21 return exitcode
22
23
24 def mode_non_interactive(console_printer, args, debug=False):
25 import functools
26
27 from coalib.coala_main import run_coala
28 from coalib.output.ConsoleInteraction import (
29 print_results_no_input, print_section_beginning)
30
31 partial_print_sec_beg = functools.partial(
32 print_section_beginning,
33 console_printer)
34 results, exitcode, _ = run_coala(
35 print_results=print_results_no_input,
36 print_section_beginning=partial_print_sec_beg,
37 force_show_patch=True,
38 console_printer=console_printer,
39 args=args,
40 debug=debug)
41
42 return exitcode
43
44
45 def mode_json(args, debug=False):
46 import json
47
48 from coalib.coala_main import run_coala
49 from coalib.output.Logging import configure_json_logging
50 from coalib.output.JSONEncoder import create_json_encoder
51
52 if args.log_json:
53 log_stream = configure_json_logging()
54
55 JSONEncoder = create_json_encoder(use_relpath=args.relpath)
56
57 results, exitcode, _ = run_coala(args=args, debug=debug)
58
59 retval = {'results': results}
60
61 if args.log_json:
62 retval['logs'] = [json.loads(line) for line in
63 log_stream.getvalue().splitlines()]
64
65 if args.output:
66 filename = str(args.output[0])
67 with open(filename, 'w') as fp:
68 json.dump(retval, fp,
69 cls=JSONEncoder,
70 sort_keys=True,
71 indent=2,
72 separators=(',', ': '))
73 else:
74 print(json.dumps(retval,
75 cls=JSONEncoder,
76 sort_keys=True,
77 indent=2,
78 separators=(',', ': ')))
79
80 return 0 if args.show_bears else exitcode
81
82
83 def mode_format(args, debug=False):
84 from coalib.coala_main import run_coala
85 from coalib.output.ConsoleInteraction import print_results_formatted
86
87 _, exitcode, _ = run_coala(
88 print_results=print_results_formatted, args=args, debug=debug)
89 return exitcode
90
[end of coalib/coala_modes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/coalib/coala_modes.py b/coalib/coala_modes.py
--- a/coalib/coala_modes.py
+++ b/coalib/coala_modes.py
@@ -1,4 +1,13 @@
def mode_normal(console_printer, log_printer, args, debug=False):
+ """
+ This is the default coala mode. User interaction is allowed in this mode.
+
+ :param console_printer: Object to print messages on the console.
+ :param log_printer: Deprecated.
+ :param args: Alternative pre-parsed CLI arguments.
+ :param debug: Run in debug mode, bypassing multiprocessing,
+ and not catching any exceptions.
+ """
import functools
from coalib.coala_main import run_coala
|
{"golden_diff": "diff --git a/coalib/coala_modes.py b/coalib/coala_modes.py\n--- a/coalib/coala_modes.py\n+++ b/coalib/coala_modes.py\n@@ -1,4 +1,13 @@\n def mode_normal(console_printer, log_printer, args, debug=False):\n+ \"\"\"\n+ This is the default coala mode. User interaction is allowed in this mode.\n+\n+ :param console_printer: Object to print messages on the console.\n+ :param log_printer: Deprecated.\n+ :param args: Alternative pre-parsed CLI arguments.\n+ :param debug: Run in debug mode, bypassing multiprocessing,\n+ and not catching any exceptions.\n+ \"\"\"\n import functools\n \n from coalib.coala_main import run_coala\n", "issue": "Add docstring for coala_modes.py mode_normal\nThis function should have a proper docstring, and indicate that `log_printer` is unused & deprecated.\n", "before_files": [{"content": "def mode_normal(console_printer, log_printer, args, debug=False):\n import functools\n\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import (\n acquire_settings, nothing_done,\n print_results, print_section_beginning)\n\n partial_print_sec_beg = functools.partial(\n print_section_beginning,\n console_printer)\n results, exitcode, _ = run_coala(\n print_results=print_results,\n acquire_settings=acquire_settings,\n print_section_beginning=partial_print_sec_beg,\n nothing_done=nothing_done,\n console_printer=console_printer,\n args=args,\n debug=debug)\n\n return exitcode\n\n\ndef mode_non_interactive(console_printer, args, debug=False):\n import functools\n\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import (\n print_results_no_input, print_section_beginning)\n\n partial_print_sec_beg = functools.partial(\n print_section_beginning,\n console_printer)\n results, exitcode, _ = run_coala(\n print_results=print_results_no_input,\n print_section_beginning=partial_print_sec_beg,\n force_show_patch=True,\n console_printer=console_printer,\n args=args,\n debug=debug)\n\n return exitcode\n\n\ndef mode_json(args, debug=False):\n import json\n\n from coalib.coala_main import run_coala\n from coalib.output.Logging import configure_json_logging\n from coalib.output.JSONEncoder import create_json_encoder\n\n if args.log_json:\n log_stream = configure_json_logging()\n\n JSONEncoder = create_json_encoder(use_relpath=args.relpath)\n\n results, exitcode, _ = run_coala(args=args, debug=debug)\n\n retval = {'results': results}\n\n if args.log_json:\n retval['logs'] = [json.loads(line) for line in\n log_stream.getvalue().splitlines()]\n\n if args.output:\n filename = str(args.output[0])\n with open(filename, 'w') as fp:\n json.dump(retval, fp,\n cls=JSONEncoder,\n sort_keys=True,\n indent=2,\n separators=(',', ': '))\n else:\n print(json.dumps(retval,\n cls=JSONEncoder,\n sort_keys=True,\n indent=2,\n separators=(',', ': ')))\n\n return 0 if args.show_bears else exitcode\n\n\ndef mode_format(args, debug=False):\n from coalib.coala_main import run_coala\n from coalib.output.ConsoleInteraction import print_results_formatted\n\n _, exitcode, _ = run_coala(\n print_results=print_results_formatted, args=args, debug=debug)\n return exitcode\n", "path": "coalib/coala_modes.py"}]}
| 1,320 | 169 |
gh_patches_debug_22901
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-381
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
celery integration RecursionError
Hi there,
I upgraded sentry_sdk to 0.7.0 and started getting RecursionError if there's an issue with celery task. Sentry record doesn't contain any stack trace for that but found that error in my apm system (can attach screenshot only, text data is a real mess there). I'm running celery 4.2.1 on Ubuntu 18.
<img width="618" alt="2019-02-14 15 04 54" src="https://user-images.githubusercontent.com/7299611/52785969-89ea2180-306a-11e9-9828-56a52f3fddd6.png">
</issue>
<code>
[start of sentry_sdk/integrations/celery.py]
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.exceptions import ( # type: ignore
6 SoftTimeLimitExceeded,
7 Retry,
8 Ignore,
9 Reject,
10 )
11
12 from sentry_sdk.hub import Hub
13 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
14 from sentry_sdk.tracing import SpanContext
15 from sentry_sdk._compat import reraise
16 from sentry_sdk.integrations import Integration
17 from sentry_sdk.integrations.logging import ignore_logger
18
19
20 CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
21
22
23 class CeleryIntegration(Integration):
24 identifier = "celery"
25
26 def __init__(self, propagate_traces=True):
27 self.propagate_traces = propagate_traces
28
29 @staticmethod
30 def setup_once():
31 import celery.app.trace as trace # type: ignore
32
33 old_build_tracer = trace.build_tracer
34
35 def sentry_build_tracer(name, task, *args, **kwargs):
36 # Need to patch both methods because older celery sometimes
37 # short-circuits to task.run if it thinks it's safe.
38 task.__call__ = _wrap_task_call(task, task.__call__)
39 task.run = _wrap_task_call(task, task.run)
40 task.apply_async = _wrap_apply_async(task, task.apply_async)
41 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
42
43 trace.build_tracer = sentry_build_tracer
44
45 _patch_worker_exit()
46
47 # This logger logs every status of every task that ran on the worker.
48 # Meaning that every task's breadcrumbs are full of stuff like "Task
49 # <foo> raised unexpected <bar>".
50 ignore_logger("celery.worker.job")
51
52
53 def _wrap_apply_async(task, f):
54 def apply_async(*args, **kwargs):
55 hub = Hub.current
56 integration = hub.get_integration(CeleryIntegration)
57 if integration is not None and integration.propagate_traces:
58 headers = None
59 for key, value in hub.iter_trace_propagation_headers():
60 if headers is None:
61 headers = dict(kwargs.get("headers") or {})
62 headers[key] = value
63 if headers is not None:
64 kwargs["headers"] = headers
65 return f(*args, **kwargs)
66
67 return apply_async
68
69
70 def _wrap_tracer(task, f):
71 # Need to wrap tracer for pushing the scope before prerun is sent, and
72 # popping it after postrun is sent.
73 #
74 # This is the reason we don't use signals for hooking in the first place.
75 # Also because in Celery 3, signal dispatch returns early if one handler
76 # crashes.
77 def _inner(*args, **kwargs):
78 hub = Hub.current
79 if hub.get_integration(CeleryIntegration) is None:
80 return f(*args, **kwargs)
81
82 with hub.push_scope() as scope:
83 scope._name = "celery"
84 scope.clear_breadcrumbs()
85 _continue_trace(args[3].get("headers") or {}, scope)
86 scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
87
88 return f(*args, **kwargs)
89
90 return _inner
91
92
93 def _continue_trace(headers, scope):
94 if headers:
95 span_context = SpanContext.continue_from_headers(headers)
96 else:
97 span_context = SpanContext.start_trace()
98 scope.set_span_context(span_context)
99
100
101 def _wrap_task_call(task, f):
102 # Need to wrap task call because the exception is caught before we get to
103 # see it. Also celery's reported stacktrace is untrustworthy.
104 def _inner(*args, **kwargs):
105 try:
106 return f(*args, **kwargs)
107 except Exception:
108 exc_info = sys.exc_info()
109 with capture_internal_exceptions():
110 _capture_exception(task, exc_info)
111 reraise(*exc_info)
112
113 return _inner
114
115
116 def _make_event_processor(task, uuid, args, kwargs, request=None):
117 def event_processor(event, hint):
118 with capture_internal_exceptions():
119 event["transaction"] = task.name
120
121 with capture_internal_exceptions():
122 extra = event.setdefault("extra", {})
123 extra["celery-job"] = {
124 "task_name": task.name,
125 "args": args,
126 "kwargs": kwargs,
127 }
128
129 if "exc_info" in hint:
130 with capture_internal_exceptions():
131 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
132 event["fingerprint"] = [
133 "celery",
134 "SoftTimeLimitExceeded",
135 getattr(task, "name", task),
136 ]
137
138 return event
139
140 return event_processor
141
142
143 def _capture_exception(task, exc_info):
144 hub = Hub.current
145
146 if hub.get_integration(CeleryIntegration) is None:
147 return
148 if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
149 return
150 if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
151 return
152
153 event, hint = event_from_exception(
154 exc_info,
155 client_options=hub.client.options,
156 mechanism={"type": "celery", "handled": False},
157 )
158
159 hub.capture_event(event, hint=hint)
160
161
162 def _patch_worker_exit():
163 # Need to flush queue before worker shutdown because a crashing worker will
164 # call os._exit
165 from billiard.pool import Worker # type: ignore
166
167 old_workloop = Worker.workloop
168
169 def sentry_workloop(*args, **kwargs):
170 try:
171 return old_workloop(*args, **kwargs)
172 finally:
173 with capture_internal_exceptions():
174 hub = Hub.current
175 if hub.get_integration(CeleryIntegration) is not None:
176 hub.flush()
177
178 Worker.workloop = sentry_workloop
179
[end of sentry_sdk/integrations/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -33,11 +33,18 @@
old_build_tracer = trace.build_tracer
def sentry_build_tracer(name, task, *args, **kwargs):
- # Need to patch both methods because older celery sometimes
- # short-circuits to task.run if it thinks it's safe.
- task.__call__ = _wrap_task_call(task, task.__call__)
- task.run = _wrap_task_call(task, task.run)
- task.apply_async = _wrap_apply_async(task, task.apply_async)
+ if not getattr(task, "_sentry_is_patched", False):
+ # Need to patch both methods because older celery sometimes
+ # short-circuits to task.run if it thinks it's safe.
+ task.__call__ = _wrap_task_call(task, task.__call__)
+ task.run = _wrap_task_call(task, task.run)
+ task.apply_async = _wrap_apply_async(task, task.apply_async)
+
+ # `build_tracer` is apparently called for every task
+ # invocation. Can't wrap every celery task for every invocation
+ # or we will get infinitely nested wrapper functions.
+ task._sentry_is_patched = True
+
return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
trace.build_tracer = sentry_build_tracer
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -33,11 +33,18 @@\n old_build_tracer = trace.build_tracer\n \n def sentry_build_tracer(name, task, *args, **kwargs):\n- # Need to patch both methods because older celery sometimes\n- # short-circuits to task.run if it thinks it's safe.\n- task.__call__ = _wrap_task_call(task, task.__call__)\n- task.run = _wrap_task_call(task, task.run)\n- task.apply_async = _wrap_apply_async(task, task.apply_async)\n+ if not getattr(task, \"_sentry_is_patched\", False):\n+ # Need to patch both methods because older celery sometimes\n+ # short-circuits to task.run if it thinks it's safe.\n+ task.__call__ = _wrap_task_call(task, task.__call__)\n+ task.run = _wrap_task_call(task, task.run)\n+ task.apply_async = _wrap_apply_async(task, task.apply_async)\n+\n+ # `build_tracer` is apparently called for every task\n+ # invocation. Can't wrap every celery task for every invocation\n+ # or we will get infinitely nested wrapper functions.\n+ task._sentry_is_patched = True\n+\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n \n trace.build_tracer = sentry_build_tracer\n", "issue": "celery integration RecursionError\nHi there,\r\nI upgraded sentry_sdk to 0.7.0 and started getting RecursionError if there's an issue with celery task. Sentry record doesn't contain any stack trace for that but found that error in my apm system (can attach screenshot only, text data is a real mess there). I'm running celery 4.2.1 on Ubuntu 18. \r\n\r\n<img width=\"618\" alt=\"2019-02-14 15 04 54\" src=\"https://user-images.githubusercontent.com/7299611/52785969-89ea2180-306a-11e9-9828-56a52f3fddd6.png\">\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import ( # type: ignore\n SoftTimeLimitExceeded,\n Retry,\n Ignore,\n Reject,\n)\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.tracing import SpanContext\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nCELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n def __init__(self, propagate_traces=True):\n self.propagate_traces = propagate_traces\n\n @staticmethod\n def setup_once():\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n task.apply_async = _wrap_apply_async(task, task.apply_async)\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_apply_async(task, f):\n def apply_async(*args, **kwargs):\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n if integration is not None and integration.propagate_traces:\n headers = None\n for key, value in hub.iter_trace_propagation_headers():\n if headers is None:\n headers = dict(kwargs.get(\"headers\") or {})\n headers[key] = value\n if headers is not None:\n kwargs[\"headers\"] = headers\n return f(*args, **kwargs)\n\n return apply_async\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n _continue_trace(args[3].get(\"headers\") or {}, scope)\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _continue_trace(headers, scope):\n if headers:\n span_context = SpanContext.continue_from_headers(headers)\n else:\n span_context = SpanContext.start_trace()\n scope.set_span_context(span_context)\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n event[\"transaction\"] = task.name\n\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}]}
| 2,420 | 355 |
gh_patches_debug_26041
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-2410
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a time column to challenge requests table
**Is your feature request related to a problem? Please describe.**
Its not clear right away when each challenge request was submitted.
**Describe the solution you'd like**
I would like to add a column that shows the time when challenge request was submitted. It would be handy.
**Describe alternatives you've considered**
Looking at a specific challenge request page to see the time.
**Additional context**
Idea shown on the picture below. Color coding is not necessary, but could be nice.

</issue>
<code>
[start of app/grandchallenge/challenges/admin.py]
1 from django.contrib import admin, messages
2 from django.contrib.admin import ModelAdmin
3 from django.core.exceptions import ValidationError
4
5 from grandchallenge.challenges.emails import send_challenge_status_update_email
6 from grandchallenge.challenges.models import (
7 Challenge,
8 ChallengeRequest,
9 ChallengeSeries,
10 ExternalChallenge,
11 )
12
13
14 class ChallengeAdmin(ModelAdmin):
15 readonly_fields = ("creator",)
16 autocomplete_fields = ("publications",)
17 ordering = ("-created",)
18 list_display = ("short_name", "created")
19 search_fields = ("short_name",)
20
21
22 class ExternalChallengeAdmin(ModelAdmin):
23 readonly_fields = ("creator",)
24 autocomplete_fields = ("publications",)
25
26
27 class ChallengeRequestAdmin(ModelAdmin):
28 readonly_fields = ("creator",)
29 ordering = ("-created",)
30 list_display = ("title", "short_name", "creator", "created", "status")
31 actions = ["create_challenge", "send_status_update_email"]
32
33 @admin.action(description="Create challenge for this request")
34 def create_challenge(self, request, queryset):
35 for challengerequest in queryset:
36 try:
37 challengerequest.create_challenge()
38 except ValidationError:
39 self.message_user(
40 request,
41 f"There already is a challenge with short "
42 f"name: {challengerequest.short_name}",
43 messages.WARNING,
44 )
45
46 @admin.action(description="Send status update email to requester")
47 def send_status_update_email(self, request, queryset):
48 for challengerequest in queryset:
49 if (
50 challengerequest.status
51 == challengerequest.ChallengeRequestStatusChoices.ACCEPTED
52 ):
53 try:
54 challenge = Challenge.objects.get(
55 short_name=challengerequest.short_name
56 )
57 except Challenge.DoesNotExist:
58 challenge = challengerequest.create_challenge()
59 else:
60 challenge = None
61 send_challenge_status_update_email(
62 challengerequest=challengerequest, challenge=challenge
63 )
64
65
66 admin.site.register(Challenge, ChallengeAdmin)
67 admin.site.register(ExternalChallenge, ExternalChallengeAdmin)
68 admin.site.register(ChallengeSeries)
69 admin.site.register(ChallengeRequest, ChallengeRequestAdmin)
70
[end of app/grandchallenge/challenges/admin.py]
[start of app/grandchallenge/core/templatetags/naturaldelta.py]
1 import humanize
2 from django import template
3
4 register = template.Library()
5
6
7 @register.filter
8 def naturaldelta(value):
9 return humanize.naturaldelta(value, months=False)
10
[end of app/grandchallenge/core/templatetags/naturaldelta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/grandchallenge/challenges/admin.py b/app/grandchallenge/challenges/admin.py
--- a/app/grandchallenge/challenges/admin.py
+++ b/app/grandchallenge/challenges/admin.py
@@ -27,8 +27,22 @@
class ChallengeRequestAdmin(ModelAdmin):
readonly_fields = ("creator",)
ordering = ("-created",)
- list_display = ("title", "short_name", "creator", "created", "status")
+ list_display = (
+ "title",
+ "short_name",
+ "creator",
+ "created",
+ "status",
+ "challenge_type",
+ "total_cost",
+ "budget_for_hosting_challenge",
+ )
actions = ["create_challenge", "send_status_update_email"]
+ list_filter = ["status", "challenge_type"]
+
+ @admin.display(description="Total cost")
+ def total_cost(self, obj):
+ return "%s" % (obj.budget["Total"])
@admin.action(description="Create challenge for this request")
def create_challenge(self, request, queryset):
diff --git a/app/grandchallenge/core/templatetags/naturaldelta.py b/app/grandchallenge/core/templatetags/naturaldelta.py
--- a/app/grandchallenge/core/templatetags/naturaldelta.py
+++ b/app/grandchallenge/core/templatetags/naturaldelta.py
@@ -1,5 +1,6 @@
import humanize
from django import template
+from django.utils import timezone
register = template.Library()
@@ -7,3 +8,8 @@
@register.filter
def naturaldelta(value):
return humanize.naturaldelta(value, months=False)
+
+
[email protected]
+def timedifference(value):
+ return (timezone.now() - value).days
|
{"golden_diff": "diff --git a/app/grandchallenge/challenges/admin.py b/app/grandchallenge/challenges/admin.py\n--- a/app/grandchallenge/challenges/admin.py\n+++ b/app/grandchallenge/challenges/admin.py\n@@ -27,8 +27,22 @@\n class ChallengeRequestAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n ordering = (\"-created\",)\n- list_display = (\"title\", \"short_name\", \"creator\", \"created\", \"status\")\n+ list_display = (\n+ \"title\",\n+ \"short_name\",\n+ \"creator\",\n+ \"created\",\n+ \"status\",\n+ \"challenge_type\",\n+ \"total_cost\",\n+ \"budget_for_hosting_challenge\",\n+ )\n actions = [\"create_challenge\", \"send_status_update_email\"]\n+ list_filter = [\"status\", \"challenge_type\"]\n+\n+ @admin.display(description=\"Total cost\")\n+ def total_cost(self, obj):\n+ return \"%s\" % (obj.budget[\"Total\"])\n \n @admin.action(description=\"Create challenge for this request\")\n def create_challenge(self, request, queryset):\ndiff --git a/app/grandchallenge/core/templatetags/naturaldelta.py b/app/grandchallenge/core/templatetags/naturaldelta.py\n--- a/app/grandchallenge/core/templatetags/naturaldelta.py\n+++ b/app/grandchallenge/core/templatetags/naturaldelta.py\n@@ -1,5 +1,6 @@\n import humanize\n from django import template\n+from django.utils import timezone\n \n register = template.Library()\n \n@@ -7,3 +8,8 @@\n @register.filter\n def naturaldelta(value):\n return humanize.naturaldelta(value, months=False)\n+\n+\[email protected]\n+def timedifference(value):\n+ return (timezone.now() - value).days\n", "issue": "Add a time column to challenge requests table\n**Is your feature request related to a problem? Please describe.**\r\nIts not clear right away when each challenge request was submitted.\r\n\r\n\r\n**Describe the solution you'd like**\r\nI would like to add a column that shows the time when challenge request was submitted. It would be handy. \r\n\r\n**Describe alternatives you've considered**\r\nLooking at a specific challenge request page to see the time.\r\n\r\n**Additional context**\r\nIdea shown on the picture below. Color coding is not necessary, but could be nice.\r\n\n", "before_files": [{"content": "from django.contrib import admin, messages\nfrom django.contrib.admin import ModelAdmin\nfrom django.core.exceptions import ValidationError\n\nfrom grandchallenge.challenges.emails import send_challenge_status_update_email\nfrom grandchallenge.challenges.models import (\n Challenge,\n ChallengeRequest,\n ChallengeSeries,\n ExternalChallenge,\n)\n\n\nclass ChallengeAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n autocomplete_fields = (\"publications\",)\n ordering = (\"-created\",)\n list_display = (\"short_name\", \"created\")\n search_fields = (\"short_name\",)\n\n\nclass ExternalChallengeAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n autocomplete_fields = (\"publications\",)\n\n\nclass ChallengeRequestAdmin(ModelAdmin):\n readonly_fields = (\"creator\",)\n ordering = (\"-created\",)\n list_display = (\"title\", \"short_name\", \"creator\", \"created\", \"status\")\n actions = [\"create_challenge\", \"send_status_update_email\"]\n\n @admin.action(description=\"Create challenge for this request\")\n def create_challenge(self, request, queryset):\n for challengerequest in queryset:\n try:\n challengerequest.create_challenge()\n except ValidationError:\n self.message_user(\n request,\n f\"There already is a challenge with short \"\n f\"name: {challengerequest.short_name}\",\n messages.WARNING,\n )\n\n @admin.action(description=\"Send status update email to requester\")\n def send_status_update_email(self, request, queryset):\n for challengerequest in queryset:\n if (\n challengerequest.status\n == challengerequest.ChallengeRequestStatusChoices.ACCEPTED\n ):\n try:\n challenge = Challenge.objects.get(\n short_name=challengerequest.short_name\n )\n except Challenge.DoesNotExist:\n challenge = challengerequest.create_challenge()\n else:\n challenge = None\n send_challenge_status_update_email(\n challengerequest=challengerequest, challenge=challenge\n )\n\n\nadmin.site.register(Challenge, ChallengeAdmin)\nadmin.site.register(ExternalChallenge, ExternalChallengeAdmin)\nadmin.site.register(ChallengeSeries)\nadmin.site.register(ChallengeRequest, ChallengeRequestAdmin)\n", "path": "app/grandchallenge/challenges/admin.py"}, {"content": "import humanize\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]\ndef naturaldelta(value):\n return humanize.naturaldelta(value, months=False)\n", "path": "app/grandchallenge/core/templatetags/naturaldelta.py"}]}
| 1,373 | 394 |
gh_patches_debug_17471
|
rasdani/github-patches
|
git_diff
|
getredash__redash-1899
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Oracle: add support for cx_Oracle v5.3.
### Issue Summary
cx_Oracle.FIXED_UNICODE: TYPE_STRING,
Variable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017
Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead).
but is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17
### Steps to Reproduce
Our Dockerfile:
FROM redash/redash:2.0.0.b2924
USER root
RUN apt-get update
RUN apt-get -y --no-install-recommends install alien
COPY "oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm"
RUN alien -i "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm"
RUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig
RUN apt-get install -y libaio1
RUN pip install cx_Oracle --pre
EXPOSE 5000
ENTRYPOINT ["/app/bin/docker-entrypoint"]
CMD ["server"]
Output:
```
AttributeError: 'module' object has no attribute 'FIXED_UNICODE'
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 122, in init_process
self.load_wsgi()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 130, in load_wsgi
self.wsgi = self.app.wsgi()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 65, in load
return self.load_wsgiapp()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp
return util.import_app(self.app_uri)
File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 357, in import_app
__import__(module)
File "/app/redash/__init__.py", line 68, in <module>
import_query_runners(settings.QUERY_RUNNERS)
File "/app/redash/query_runner/__init__.py", line 175, in import_query_runners
__import__(runner_import)
File "/app/redash/query_runner/oracle.py", line 17, in <module>
cx_Oracle.FIXED_UNICODE: TYPE_STRING,
AttributeError: 'module' object has no attribute 'FIXED_UNICODE
```'
### Technical details:
* Redash Version: Docker Image redash/redash:2.0.0.b2924
* How did you install Redash: Dockerfile
Oracle: add support for cx_Oracle v5.3.
### Issue Summary
cx_Oracle.FIXED_UNICODE: TYPE_STRING,
Variable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017
Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead).
but is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17
### Steps to Reproduce
Our Dockerfile:
FROM redash/redash:2.0.0.b2924
USER root
RUN apt-get update
RUN apt-get -y --no-install-recommends install alien
COPY "oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm" "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm"
RUN alien -i "/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm"
RUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig
RUN apt-get install -y libaio1
RUN pip install cx_Oracle --pre
EXPOSE 5000
ENTRYPOINT ["/app/bin/docker-entrypoint"]
CMD ["server"]
Output:
```
AttributeError: 'module' object has no attribute 'FIXED_UNICODE'
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py", line 515, in spawn_worker
worker.init_process()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 122, in init_process
self.load_wsgi()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py", line 130, in load_wsgi
self.wsgi = self.app.wsgi()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py", line 67, in wsgi
self.callable = self.load()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 65, in load
return self.load_wsgiapp()
File "/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py", line 52, in load_wsgiapp
return util.import_app(self.app_uri)
File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 357, in import_app
__import__(module)
File "/app/redash/__init__.py", line 68, in <module>
import_query_runners(settings.QUERY_RUNNERS)
File "/app/redash/query_runner/__init__.py", line 175, in import_query_runners
__import__(runner_import)
File "/app/redash/query_runner/oracle.py", line 17, in <module>
cx_Oracle.FIXED_UNICODE: TYPE_STRING,
AttributeError: 'module' object has no attribute 'FIXED_UNICODE
```'
### Technical details:
* Redash Version: Docker Image redash/redash:2.0.0.b2924
* How did you install Redash: Dockerfile
</issue>
<code>
[start of redash/query_runner/oracle.py]
1 import json
2 import logging
3 import sys
4
5 from redash.query_runner import *
6 from redash.utils import JSONEncoder
7
8 try:
9 import cx_Oracle
10
11 TYPES_MAP = {
12 cx_Oracle.DATETIME: TYPE_DATETIME,
13 cx_Oracle.CLOB: TYPE_STRING,
14 cx_Oracle.LOB: TYPE_STRING,
15 cx_Oracle.FIXED_CHAR: TYPE_STRING,
16 cx_Oracle.FIXED_NCHAR: TYPE_STRING,
17 cx_Oracle.FIXED_UNICODE: TYPE_STRING,
18 cx_Oracle.INTERVAL: TYPE_DATETIME,
19 cx_Oracle.LONG_NCHAR: TYPE_STRING,
20 cx_Oracle.LONG_STRING: TYPE_STRING,
21 cx_Oracle.LONG_UNICODE: TYPE_STRING,
22 cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,
23 cx_Oracle.NCHAR: TYPE_STRING,
24 cx_Oracle.NUMBER: TYPE_FLOAT,
25 cx_Oracle.ROWID: TYPE_INTEGER,
26 cx_Oracle.STRING: TYPE_STRING,
27 cx_Oracle.TIMESTAMP: TYPE_DATETIME,
28 cx_Oracle.UNICODE: TYPE_STRING,
29 }
30
31
32 ENABLED = True
33 except ImportError:
34 ENABLED = False
35
36 logger = logging.getLogger(__name__)
37
38 class Oracle(BaseSQLQueryRunner):
39 noop_query = "SELECT 1 FROM dual"
40
41 @classmethod
42 def get_col_type(cls, col_type, scale):
43 if col_type == cx_Oracle.NUMBER:
44 return TYPE_FLOAT if scale > 0 else TYPE_INTEGER
45 else:
46 return TYPES_MAP.get(col_type, None)
47
48 @classmethod
49 def enabled(cls):
50 return ENABLED
51
52 @classmethod
53 def configuration_schema(cls):
54 return {
55 "type": "object",
56 "properties": {
57 "user": {
58 "type": "string"
59 },
60 "password": {
61 "type": "string"
62 },
63 "host": {
64 "type": "string"
65 },
66 "port": {
67 "type": "number"
68 },
69 "servicename": {
70 "type": "string",
71 "title": "DSN Service Name"
72 }
73 },
74 "required": ["servicename", "user", "password", "host", "port"],
75 "secret": ["password"]
76 }
77
78 @classmethod
79 def type(cls):
80 return "oracle"
81
82 def __init__(self, configuration):
83 super(Oracle, self).__init__(configuration)
84
85 dsn = cx_Oracle.makedsn(
86 self.configuration["host"],
87 self.configuration["port"],
88 service_name=self.configuration["servicename"])
89
90 self.connection_string = "{}/{}@{}".format(self.configuration["user"], self.configuration["password"], dsn)
91
92 def _get_tables(self, schema):
93 query = """
94 SELECT
95 all_tab_cols.OWNER,
96 all_tab_cols.TABLE_NAME,
97 all_tab_cols.COLUMN_NAME
98 FROM all_tab_cols
99 WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')
100 """
101
102 results, error = self.run_query(query, None)
103
104 if error is not None:
105 raise Exception("Failed getting schema.")
106
107 results = json.loads(results)
108
109 for row in results['rows']:
110 if row['OWNER'] != None:
111 table_name = '{}.{}'.format(row['OWNER'], row['TABLE_NAME'])
112 else:
113 table_name = row['TABLE_NAME']
114
115 if table_name not in schema:
116 schema[table_name] = {'name': table_name, 'columns': []}
117
118 schema[table_name]['columns'].append(row['COLUMN_NAME'])
119
120 return schema.values()
121
122 @classmethod
123 def _convert_number(cls, value):
124 try:
125 return int(value)
126 except:
127 return value
128
129 @classmethod
130 def output_handler(cls, cursor, name, default_type, length, precision, scale):
131 if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):
132 return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)
133
134 if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
135 return cursor.var(unicode, length, cursor.arraysize)
136
137 if default_type == cx_Oracle.NUMBER:
138 if scale <= 0:
139 return cursor.var(cx_Oracle.STRING, 255, outconverter=Oracle._convert_number, arraysize=cursor.arraysize)
140
141 def run_query(self, query, user):
142 connection = cx_Oracle.connect(self.connection_string)
143 connection.outputtypehandler = Oracle.output_handler
144
145 cursor = connection.cursor()
146
147 try:
148 cursor.execute(query)
149
150 if cursor.description is not None:
151 columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description])
152 rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
153
154 data = {'columns': columns, 'rows': rows}
155 error = None
156 json_data = json.dumps(data, cls=JSONEncoder)
157 else:
158 error = 'Query completed but it returned no data.'
159 json_data = None
160 except cx_Oracle.DatabaseError as err:
161 error = u"Query failed. {}.".format(err.message)
162 json_data = None
163 except KeyboardInterrupt:
164 connection.cancel()
165 error = "Query cancelled by user."
166 json_data = None
167 finally:
168 connection.close()
169
170 return json_data, error
171
172 register(Oracle)
173
[end of redash/query_runner/oracle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py
--- a/redash/query_runner/oracle.py
+++ b/redash/query_runner/oracle.py
@@ -14,18 +14,14 @@
cx_Oracle.LOB: TYPE_STRING,
cx_Oracle.FIXED_CHAR: TYPE_STRING,
cx_Oracle.FIXED_NCHAR: TYPE_STRING,
- cx_Oracle.FIXED_UNICODE: TYPE_STRING,
cx_Oracle.INTERVAL: TYPE_DATETIME,
- cx_Oracle.LONG_NCHAR: TYPE_STRING,
cx_Oracle.LONG_STRING: TYPE_STRING,
- cx_Oracle.LONG_UNICODE: TYPE_STRING,
cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,
cx_Oracle.NCHAR: TYPE_STRING,
cx_Oracle.NUMBER: TYPE_FLOAT,
cx_Oracle.ROWID: TYPE_INTEGER,
cx_Oracle.STRING: TYPE_STRING,
cx_Oracle.TIMESTAMP: TYPE_DATETIME,
- cx_Oracle.UNICODE: TYPE_STRING,
}
|
{"golden_diff": "diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py\n--- a/redash/query_runner/oracle.py\n+++ b/redash/query_runner/oracle.py\n@@ -14,18 +14,14 @@\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n- cx_Oracle.FIXED_UNICODE: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n- cx_Oracle.LONG_NCHAR: TYPE_STRING,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n- cx_Oracle.LONG_UNICODE: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n- cx_Oracle.UNICODE: TYPE_STRING,\n }\n", "issue": "Oracle: add support for cx_Oracle v5.3.\n\r\n### Issue Summary\r\n\r\ncx_Oracle.FIXED_UNICODE: TYPE_STRING,\r\n\r\nVariable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017 \r\n\r\n Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead).\r\n\r\nbut is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\nOur Dockerfile:\r\n\r\n\tFROM redash/redash:2.0.0.b2924\r\n\r\n\tUSER root\r\n\r\n\tRUN apt-get update\r\n\r\n\tRUN apt-get -y --no-install-recommends install alien\r\n\tCOPY \"oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\" \"/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\"\r\n\tRUN alien -i \"/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\"\r\n\tRUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig\r\n\tRUN apt-get install -y libaio1\r\n\tRUN pip install cx_Oracle --pre\r\n\t\r\n\tEXPOSE 5000\r\n\t\r\n\tENTRYPOINT [\"/app/bin/docker-entrypoint\"]\r\n\t\r\n\tCMD [\"server\"]\r\n\r\n\r\nOutput: \r\n```\r\n AttributeError: 'module' object has no attribute 'FIXED_UNICODE'\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py\", line 515, in spawn_worker\r\n worker.init_process()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py\", line 122, in init_process\r\n self.load_wsgi()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py\", line 130, in load_wsgi\r\n self.wsgi = self.app.wsgi()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n self.callable = self.load()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py\", line 65, in load\r\n return self.load_wsgiapp()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py\", line 52, in load_wsgiapp\r\n return util.import_app(self.app_uri)\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/util.py\", line 357, in import_app\r\n __import__(module)\r\n File \"/app/redash/__init__.py\", line 68, in <module>\r\n import_query_runners(settings.QUERY_RUNNERS)\r\n File \"/app/redash/query_runner/__init__.py\", line 175, in import_query_runners\r\n __import__(runner_import)\r\n File \"/app/redash/query_runner/oracle.py\", line 17, in <module>\r\n cx_Oracle.FIXED_UNICODE: TYPE_STRING,\r\n AttributeError: 'module' object has no attribute 'FIXED_UNICODE\r\n```'\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: Docker Image redash/redash:2.0.0.b2924\r\n* How did you install Redash: Dockerfile\r\n\nOracle: add support for cx_Oracle v5.3.\n\r\n### Issue Summary\r\n\r\ncx_Oracle.FIXED_UNICODE: TYPE_STRING,\r\n\r\nVariable FIXED_UNICODE was removed with Release 5.3 of cx_Oracle: https://github.com/oracle/python-cx_Oracle/blob/ae45152339f0e9b46a93d5aea74f3bc16e775060/doc/src/releasenotes.rst#version-53-march-2017 \r\n\r\n Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead).\r\n\r\nbut is referenced in https://github.com/getredash/redash/blob/master/redash/query_runner/oracle.py#L17\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\nOur Dockerfile:\r\n\r\n\tFROM redash/redash:2.0.0.b2924\r\n\r\n\tUSER root\r\n\r\n\tRUN apt-get update\r\n\r\n\tRUN apt-get -y --no-install-recommends install alien\r\n\tCOPY \"oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\" \"/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\"\r\n\tRUN alien -i \"/tmp/oracle-instantclient12.2-basiclite-12.2.0.1.0-1.x86_64.rpm\"\r\n\tRUN echo /usr/lib/oracle/12.2/client64/lib > /etc/ld.so.conf.d/oracle-instantclient.conf && ldconfig\r\n\tRUN apt-get install -y libaio1\r\n\tRUN pip install cx_Oracle --pre\r\n\t\r\n\tEXPOSE 5000\r\n\t\r\n\tENTRYPOINT [\"/app/bin/docker-entrypoint\"]\r\n\t\r\n\tCMD [\"server\"]\r\n\r\n\r\nOutput: \r\n```\r\n AttributeError: 'module' object has no attribute 'FIXED_UNICODE'\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/arbiter.py\", line 515, in spawn_worker\r\n worker.init_process()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py\", line 122, in init_process\r\n self.load_wsgi()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/workers/base.py\", line 130, in load_wsgi\r\n self.wsgi = self.app.wsgi()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/base.py\", line 67, in wsgi\r\n self.callable = self.load()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py\", line 65, in load\r\n return self.load_wsgiapp()\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/app/wsgiapp.py\", line 52, in load_wsgiapp\r\n return util.import_app(self.app_uri)\r\n File \"/usr/local/lib/python2.7/dist-packages/gunicorn/util.py\", line 357, in import_app\r\n __import__(module)\r\n File \"/app/redash/__init__.py\", line 68, in <module>\r\n import_query_runners(settings.QUERY_RUNNERS)\r\n File \"/app/redash/query_runner/__init__.py\", line 175, in import_query_runners\r\n __import__(runner_import)\r\n File \"/app/redash/query_runner/oracle.py\", line 17, in <module>\r\n cx_Oracle.FIXED_UNICODE: TYPE_STRING,\r\n AttributeError: 'module' object has no attribute 'FIXED_UNICODE\r\n```'\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: Docker Image redash/redash:2.0.0.b2924\r\n* How did you install Redash: Dockerfile\r\n\n", "before_files": [{"content": "import json\nimport logging\nimport sys\n\nfrom redash.query_runner import *\nfrom redash.utils import JSONEncoder\n\ntry:\n import cx_Oracle\n\n TYPES_MAP = {\n cx_Oracle.DATETIME: TYPE_DATETIME,\n cx_Oracle.CLOB: TYPE_STRING,\n cx_Oracle.LOB: TYPE_STRING,\n cx_Oracle.FIXED_CHAR: TYPE_STRING,\n cx_Oracle.FIXED_NCHAR: TYPE_STRING,\n cx_Oracle.FIXED_UNICODE: TYPE_STRING,\n cx_Oracle.INTERVAL: TYPE_DATETIME,\n cx_Oracle.LONG_NCHAR: TYPE_STRING,\n cx_Oracle.LONG_STRING: TYPE_STRING,\n cx_Oracle.LONG_UNICODE: TYPE_STRING,\n cx_Oracle.NATIVE_FLOAT: TYPE_FLOAT,\n cx_Oracle.NCHAR: TYPE_STRING,\n cx_Oracle.NUMBER: TYPE_FLOAT,\n cx_Oracle.ROWID: TYPE_INTEGER,\n cx_Oracle.STRING: TYPE_STRING,\n cx_Oracle.TIMESTAMP: TYPE_DATETIME,\n cx_Oracle.UNICODE: TYPE_STRING,\n }\n\n\n ENABLED = True\nexcept ImportError:\n ENABLED = False\n\nlogger = logging.getLogger(__name__)\n\nclass Oracle(BaseSQLQueryRunner):\n noop_query = \"SELECT 1 FROM dual\"\n\n @classmethod\n def get_col_type(cls, col_type, scale):\n if col_type == cx_Oracle.NUMBER:\n return TYPE_FLOAT if scale > 0 else TYPE_INTEGER\n else:\n return TYPES_MAP.get(col_type, None)\n\n @classmethod\n def enabled(cls):\n return ENABLED\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"string\"\n },\n \"password\": {\n \"type\": \"string\"\n },\n \"host\": {\n \"type\": \"string\"\n },\n \"port\": {\n \"type\": \"number\"\n },\n \"servicename\": {\n \"type\": \"string\",\n \"title\": \"DSN Service Name\"\n }\n },\n \"required\": [\"servicename\", \"user\", \"password\", \"host\", \"port\"],\n \"secret\": [\"password\"]\n }\n\n @classmethod\n def type(cls):\n return \"oracle\"\n\n def __init__(self, configuration):\n super(Oracle, self).__init__(configuration)\n\n dsn = cx_Oracle.makedsn(\n self.configuration[\"host\"],\n self.configuration[\"port\"],\n service_name=self.configuration[\"servicename\"])\n\n self.connection_string = \"{}/{}@{}\".format(self.configuration[\"user\"], self.configuration[\"password\"], dsn)\n\n def _get_tables(self, schema):\n query = \"\"\"\n SELECT\n all_tab_cols.OWNER,\n all_tab_cols.TABLE_NAME,\n all_tab_cols.COLUMN_NAME\n FROM all_tab_cols\n WHERE all_tab_cols.OWNER NOT IN('SYS','SYSTEM','ORDSYS','CTXSYS','WMSYS','MDSYS','ORDDATA','XDB','OUTLN','DMSYS','DSSYS','EXFSYS','LBACSYS','TSMSYS')\n \"\"\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json.loads(results)\n\n for row in results['rows']:\n if row['OWNER'] != None:\n table_name = '{}.{}'.format(row['OWNER'], row['TABLE_NAME'])\n else:\n table_name = row['TABLE_NAME']\n\n if table_name not in schema:\n schema[table_name] = {'name': table_name, 'columns': []}\n\n schema[table_name]['columns'].append(row['COLUMN_NAME'])\n\n return schema.values()\n\n @classmethod\n def _convert_number(cls, value):\n try:\n return int(value)\n except:\n return value\n\n @classmethod\n def output_handler(cls, cursor, name, default_type, length, precision, scale):\n if default_type in (cx_Oracle.CLOB, cx_Oracle.LOB):\n return cursor.var(cx_Oracle.LONG_STRING, 80000, cursor.arraysize)\n\n if default_type in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):\n return cursor.var(unicode, length, cursor.arraysize)\n\n if default_type == cx_Oracle.NUMBER:\n if scale <= 0:\n return cursor.var(cx_Oracle.STRING, 255, outconverter=Oracle._convert_number, arraysize=cursor.arraysize)\n\n def run_query(self, query, user):\n connection = cx_Oracle.connect(self.connection_string)\n connection.outputtypehandler = Oracle.output_handler\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description])\n rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]\n\n data = {'columns': columns, 'rows': rows}\n error = None\n json_data = json.dumps(data, cls=JSONEncoder)\n else:\n error = 'Query completed but it returned no data.'\n json_data = None\n except cx_Oracle.DatabaseError as err:\n error = u\"Query failed. {}.\".format(err.message)\n json_data = None\n except KeyboardInterrupt:\n connection.cancel()\n error = \"Query cancelled by user.\"\n json_data = None\n finally:\n connection.close()\n\n return json_data, error\n\nregister(Oracle)\n", "path": "redash/query_runner/oracle.py"}]}
| 3,870 | 224 |
gh_patches_debug_23029
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1090
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Propose removal of `GraphicsScene.ExportDialog.ExporterParameters`
I propose to remove `ExportDialog.exporterParameters`. They are used to remember the export parameters of an exporter when the user switches between exporters (e.g. SVGExporter and ImageExporter).
https://github.com/pyqtgraph/pyqtgraph/blob/db483f8ec22a0d53bd0e71fac69b0045a96c6637/pyqtgraph/GraphicsScene/exportDialog.py#L127-L136
### Reasons
1. I doubt that anyone actually needs this.
2. I would like to create a PR that automatically sets the correct resolution in the ImageExporter and also keeps the correct aspect ratio when the user changes width or height in the ImageExporter (This feature was intended but is currently broken). The problem is that a simple fix would invalidate the rationale behind `ExportDialog.exporterParameters`, because changing the scene item would also update the ImageExporter parameters (and the stored parameters are not used). An elaborate fix would add a lot of clutter code (creating a dictionary of dictionaries for each item and each exporter).
The simple fix is highlighted here:
https://github.com/paulmueller/pyqtgraph/blob/b81f6d66d2b3cf4d334aa31b75b71e68cc7a3796/pyqtgraph/GraphicsScene/exportDialog.py#L101-L111
### Related Bug
As mentioned in my second point, the ImageExporter currently only displays the height and width of the scene and it does not adjust when e.g. a PlotItem is selected (which has different height, widht, aspect ratio). Furthermore, width and height are not updated to match the PlotItem aspect ratio if I change one of width or height.
### Question
Would you accept a PR that gets rid of `ExportDialog.exporterParameters` and fixes the related bug?
Propose removal of `GraphicsScene.ExportDialog.ExporterParameters`
I propose to remove `ExportDialog.exporterParameters`. They are used to remember the export parameters of an exporter when the user switches between exporters (e.g. SVGExporter and ImageExporter).
https://github.com/pyqtgraph/pyqtgraph/blob/db483f8ec22a0d53bd0e71fac69b0045a96c6637/pyqtgraph/GraphicsScene/exportDialog.py#L127-L136
### Reasons
1. I doubt that anyone actually needs this.
2. I would like to create a PR that automatically sets the correct resolution in the ImageExporter and also keeps the correct aspect ratio when the user changes width or height in the ImageExporter (This feature was intended but is currently broken). The problem is that a simple fix would invalidate the rationale behind `ExportDialog.exporterParameters`, because changing the scene item would also update the ImageExporter parameters (and the stored parameters are not used). An elaborate fix would add a lot of clutter code (creating a dictionary of dictionaries for each item and each exporter).
The simple fix is highlighted here:
https://github.com/paulmueller/pyqtgraph/blob/b81f6d66d2b3cf4d334aa31b75b71e68cc7a3796/pyqtgraph/GraphicsScene/exportDialog.py#L101-L111
### Related Bug
As mentioned in my second point, the ImageExporter currently only displays the height and width of the scene and it does not adjust when e.g. a PlotItem is selected (which has different height, widht, aspect ratio). Furthermore, width and height are not updated to match the PlotItem aspect ratio if I change one of width or height.
### Question
Would you accept a PR that gets rid of `ExportDialog.exporterParameters` and fixes the related bug?
</issue>
<code>
[start of pyqtgraph/GraphicsScene/exportDialog.py]
1 from ..Qt import QtCore, QtGui, QT_LIB
2 from .. import exporters as exporters
3 from .. import functions as fn
4 from ..graphicsItems.ViewBox import ViewBox
5 from ..graphicsItems.PlotItem import PlotItem
6
7 if QT_LIB == 'PySide':
8 from . import exportDialogTemplate_pyside as exportDialogTemplate
9 elif QT_LIB == 'PySide2':
10 from . import exportDialogTemplate_pyside2 as exportDialogTemplate
11 elif QT_LIB == 'PyQt5':
12 from . import exportDialogTemplate_pyqt5 as exportDialogTemplate
13 else:
14 from . import exportDialogTemplate_pyqt as exportDialogTemplate
15
16
17 class ExportDialog(QtGui.QWidget):
18 def __init__(self, scene):
19 QtGui.QWidget.__init__(self)
20 self.setVisible(False)
21 self.setWindowTitle("Export")
22 self.shown = False
23 self.currentExporter = None
24 self.scene = scene
25
26 self.exporterParameters = {}
27
28 self.selectBox = QtGui.QGraphicsRectItem()
29 self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.DashLine))
30 self.selectBox.hide()
31 self.scene.addItem(self.selectBox)
32
33 self.ui = exportDialogTemplate.Ui_Form()
34 self.ui.setupUi(self)
35
36 self.ui.closeBtn.clicked.connect(self.close)
37 self.ui.exportBtn.clicked.connect(self.exportClicked)
38 self.ui.copyBtn.clicked.connect(self.copyClicked)
39 self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)
40 self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)
41
42
43 def show(self, item=None):
44 if item is not None:
45 ## Select next exportable parent of the item originally clicked on
46 while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:
47 item = item.parentItem()
48 ## if this is a ViewBox inside a PlotItem, select the parent instead.
49 if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):
50 item = item.parentItem()
51 self.updateItemList(select=item)
52 self.setVisible(True)
53 self.activateWindow()
54 self.raise_()
55 self.selectBox.setVisible(True)
56
57 if not self.shown:
58 self.shown = True
59 vcenter = self.scene.getViewWidget().geometry().center()
60 self.setGeometry(vcenter.x()-self.width()/2, vcenter.y()-self.height()/2, self.width(), self.height())
61
62 def updateItemList(self, select=None):
63 self.ui.itemTree.clear()
64 si = QtGui.QTreeWidgetItem(["Entire Scene"])
65 si.gitem = self.scene
66 self.ui.itemTree.addTopLevelItem(si)
67 self.ui.itemTree.setCurrentItem(si)
68 si.setExpanded(True)
69 for child in self.scene.items():
70 if child.parentItem() is None:
71 self.updateItemTree(child, si, select=select)
72
73 def updateItemTree(self, item, treeItem, select=None):
74 si = None
75 if isinstance(item, ViewBox):
76 si = QtGui.QTreeWidgetItem(['ViewBox'])
77 elif isinstance(item, PlotItem):
78 si = QtGui.QTreeWidgetItem(['Plot'])
79
80 if si is not None:
81 si.gitem = item
82 treeItem.addChild(si)
83 treeItem = si
84 if si.gitem is select:
85 self.ui.itemTree.setCurrentItem(si)
86
87 for ch in item.childItems():
88 self.updateItemTree(ch, treeItem, select=select)
89
90
91 def exportItemChanged(self, item, prev):
92 if item is None:
93 return
94 if item.gitem is self.scene:
95 newBounds = self.scene.views()[0].viewRect()
96 else:
97 newBounds = item.gitem.sceneBoundingRect()
98 self.selectBox.setRect(newBounds)
99 self.selectBox.show()
100 self.updateFormatList()
101
102 def updateFormatList(self):
103 current = self.ui.formatList.currentItem()
104 if current is not None:
105 current = str(current.text())
106 self.ui.formatList.clear()
107 self.exporterClasses = {}
108 gotCurrent = False
109 for exp in exporters.listExporters():
110 self.ui.formatList.addItem(exp.Name)
111 self.exporterClasses[exp.Name] = exp
112 if exp.Name == current:
113 self.ui.formatList.setCurrentRow(self.ui.formatList.count()-1)
114 gotCurrent = True
115
116 if not gotCurrent:
117 self.ui.formatList.setCurrentRow(0)
118
119 def exportFormatChanged(self, item, prev):
120 if item is None:
121 self.currentExporter = None
122 self.ui.paramTree.clear()
123 return
124 expClass = self.exporterClasses[str(item.text())]
125 exp = expClass(item=self.ui.itemTree.currentItem().gitem)
126
127 if prev:
128 oldtext = str(prev.text())
129 self.exporterParameters[oldtext] = self.currentExporter.parameters()
130 newtext = str(item.text())
131 if newtext in self.exporterParameters.keys():
132 params = self.exporterParameters[newtext]
133 exp.params = params
134 else:
135 params = exp.parameters()
136 self.exporterParameters[newtext] = params
137
138 if params is None:
139 self.ui.paramTree.clear()
140 else:
141 self.ui.paramTree.setParameters(params)
142 self.currentExporter = exp
143 self.ui.copyBtn.setEnabled(exp.allowCopy)
144
145 def exportClicked(self):
146 self.selectBox.hide()
147 self.currentExporter.export()
148
149 def copyClicked(self):
150 self.selectBox.hide()
151 self.currentExporter.export(copy=True)
152
153 def close(self):
154 self.selectBox.setVisible(False)
155 self.setVisible(False)
156
157 def closeEvent(self, event):
158 self.close()
159 QtGui.QWidget.closeEvent(self, event)
160
[end of pyqtgraph/GraphicsScene/exportDialog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyqtgraph/GraphicsScene/exportDialog.py b/pyqtgraph/GraphicsScene/exportDialog.py
--- a/pyqtgraph/GraphicsScene/exportDialog.py
+++ b/pyqtgraph/GraphicsScene/exportDialog.py
@@ -22,8 +22,6 @@
self.shown = False
self.currentExporter = None
self.scene = scene
-
- self.exporterParameters = {}
self.selectBox = QtGui.QGraphicsRectItem()
self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.DashLine))
@@ -124,16 +122,7 @@
expClass = self.exporterClasses[str(item.text())]
exp = expClass(item=self.ui.itemTree.currentItem().gitem)
- if prev:
- oldtext = str(prev.text())
- self.exporterParameters[oldtext] = self.currentExporter.parameters()
- newtext = str(item.text())
- if newtext in self.exporterParameters.keys():
- params = self.exporterParameters[newtext]
- exp.params = params
- else:
- params = exp.parameters()
- self.exporterParameters[newtext] = params
+ params = exp.parameters()
if params is None:
self.ui.paramTree.clear()
|
{"golden_diff": "diff --git a/pyqtgraph/GraphicsScene/exportDialog.py b/pyqtgraph/GraphicsScene/exportDialog.py\n--- a/pyqtgraph/GraphicsScene/exportDialog.py\n+++ b/pyqtgraph/GraphicsScene/exportDialog.py\n@@ -22,8 +22,6 @@\n self.shown = False\n self.currentExporter = None\n self.scene = scene\n- \n- self.exporterParameters = {}\n \n self.selectBox = QtGui.QGraphicsRectItem()\n self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.DashLine))\n@@ -124,16 +122,7 @@\n expClass = self.exporterClasses[str(item.text())]\n exp = expClass(item=self.ui.itemTree.currentItem().gitem)\n \n- if prev:\n- oldtext = str(prev.text())\n- self.exporterParameters[oldtext] = self.currentExporter.parameters()\n- newtext = str(item.text())\n- if newtext in self.exporterParameters.keys():\n- params = self.exporterParameters[newtext]\n- exp.params = params\n- else:\n- params = exp.parameters()\n- self.exporterParameters[newtext] = params\n+ params = exp.parameters()\n \n if params is None:\n self.ui.paramTree.clear()\n", "issue": "Propose removal of `GraphicsScene.ExportDialog.ExporterParameters`\nI propose to remove `ExportDialog.exporterParameters`. They are used to remember the export parameters of an exporter when the user switches between exporters (e.g. SVGExporter and ImageExporter).\r\n\r\nhttps://github.com/pyqtgraph/pyqtgraph/blob/db483f8ec22a0d53bd0e71fac69b0045a96c6637/pyqtgraph/GraphicsScene/exportDialog.py#L127-L136\r\n\r\n### Reasons\r\n\r\n1. I doubt that anyone actually needs this.\r\n\r\n2. I would like to create a PR that automatically sets the correct resolution in the ImageExporter and also keeps the correct aspect ratio when the user changes width or height in the ImageExporter (This feature was intended but is currently broken). The problem is that a simple fix would invalidate the rationale behind `ExportDialog.exporterParameters`, because changing the scene item would also update the ImageExporter parameters (and the stored parameters are not used). An elaborate fix would add a lot of clutter code (creating a dictionary of dictionaries for each item and each exporter).\r\nThe simple fix is highlighted here:\r\nhttps://github.com/paulmueller/pyqtgraph/blob/b81f6d66d2b3cf4d334aa31b75b71e68cc7a3796/pyqtgraph/GraphicsScene/exportDialog.py#L101-L111\r\n\r\n### Related Bug\r\n\r\nAs mentioned in my second point, the ImageExporter currently only displays the height and width of the scene and it does not adjust when e.g. a PlotItem is selected (which has different height, widht, aspect ratio). Furthermore, width and height are not updated to match the PlotItem aspect ratio if I change one of width or height.\r\n\r\n### Question\r\n\r\nWould you accept a PR that gets rid of `ExportDialog.exporterParameters` and fixes the related bug?\nPropose removal of `GraphicsScene.ExportDialog.ExporterParameters`\nI propose to remove `ExportDialog.exporterParameters`. They are used to remember the export parameters of an exporter when the user switches between exporters (e.g. SVGExporter and ImageExporter).\r\n\r\nhttps://github.com/pyqtgraph/pyqtgraph/blob/db483f8ec22a0d53bd0e71fac69b0045a96c6637/pyqtgraph/GraphicsScene/exportDialog.py#L127-L136\r\n\r\n### Reasons\r\n\r\n1. I doubt that anyone actually needs this.\r\n\r\n2. I would like to create a PR that automatically sets the correct resolution in the ImageExporter and also keeps the correct aspect ratio when the user changes width or height in the ImageExporter (This feature was intended but is currently broken). The problem is that a simple fix would invalidate the rationale behind `ExportDialog.exporterParameters`, because changing the scene item would also update the ImageExporter parameters (and the stored parameters are not used). An elaborate fix would add a lot of clutter code (creating a dictionary of dictionaries for each item and each exporter).\r\nThe simple fix is highlighted here:\r\nhttps://github.com/paulmueller/pyqtgraph/blob/b81f6d66d2b3cf4d334aa31b75b71e68cc7a3796/pyqtgraph/GraphicsScene/exportDialog.py#L101-L111\r\n\r\n### Related Bug\r\n\r\nAs mentioned in my second point, the ImageExporter currently only displays the height and width of the scene and it does not adjust when e.g. a PlotItem is selected (which has different height, widht, aspect ratio). Furthermore, width and height are not updated to match the PlotItem aspect ratio if I change one of width or height.\r\n\r\n### Question\r\n\r\nWould you accept a PR that gets rid of `ExportDialog.exporterParameters` and fixes the related bug?\n", "before_files": [{"content": "from ..Qt import QtCore, QtGui, QT_LIB\nfrom .. import exporters as exporters\nfrom .. import functions as fn\nfrom ..graphicsItems.ViewBox import ViewBox\nfrom ..graphicsItems.PlotItem import PlotItem\n\nif QT_LIB == 'PySide':\n from . import exportDialogTemplate_pyside as exportDialogTemplate\nelif QT_LIB == 'PySide2':\n from . import exportDialogTemplate_pyside2 as exportDialogTemplate\nelif QT_LIB == 'PyQt5':\n from . import exportDialogTemplate_pyqt5 as exportDialogTemplate\nelse:\n from . import exportDialogTemplate_pyqt as exportDialogTemplate\n\n\nclass ExportDialog(QtGui.QWidget):\n def __init__(self, scene):\n QtGui.QWidget.__init__(self)\n self.setVisible(False)\n self.setWindowTitle(\"Export\")\n self.shown = False\n self.currentExporter = None\n self.scene = scene\n \n self.exporterParameters = {}\n\n self.selectBox = QtGui.QGraphicsRectItem()\n self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.DashLine))\n self.selectBox.hide()\n self.scene.addItem(self.selectBox)\n \n self.ui = exportDialogTemplate.Ui_Form()\n self.ui.setupUi(self)\n \n self.ui.closeBtn.clicked.connect(self.close)\n self.ui.exportBtn.clicked.connect(self.exportClicked)\n self.ui.copyBtn.clicked.connect(self.copyClicked)\n self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)\n self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)\n \n\n def show(self, item=None):\n if item is not None:\n ## Select next exportable parent of the item originally clicked on\n while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:\n item = item.parentItem()\n ## if this is a ViewBox inside a PlotItem, select the parent instead.\n if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):\n item = item.parentItem()\n self.updateItemList(select=item)\n self.setVisible(True)\n self.activateWindow()\n self.raise_()\n self.selectBox.setVisible(True)\n \n if not self.shown:\n self.shown = True\n vcenter = self.scene.getViewWidget().geometry().center()\n self.setGeometry(vcenter.x()-self.width()/2, vcenter.y()-self.height()/2, self.width(), self.height())\n \n def updateItemList(self, select=None):\n self.ui.itemTree.clear()\n si = QtGui.QTreeWidgetItem([\"Entire Scene\"])\n si.gitem = self.scene\n self.ui.itemTree.addTopLevelItem(si)\n self.ui.itemTree.setCurrentItem(si)\n si.setExpanded(True)\n for child in self.scene.items():\n if child.parentItem() is None:\n self.updateItemTree(child, si, select=select)\n \n def updateItemTree(self, item, treeItem, select=None):\n si = None\n if isinstance(item, ViewBox):\n si = QtGui.QTreeWidgetItem(['ViewBox'])\n elif isinstance(item, PlotItem):\n si = QtGui.QTreeWidgetItem(['Plot'])\n \n if si is not None:\n si.gitem = item\n treeItem.addChild(si)\n treeItem = si\n if si.gitem is select:\n self.ui.itemTree.setCurrentItem(si)\n \n for ch in item.childItems():\n self.updateItemTree(ch, treeItem, select=select)\n \n \n def exportItemChanged(self, item, prev):\n if item is None:\n return\n if item.gitem is self.scene:\n newBounds = self.scene.views()[0].viewRect()\n else:\n newBounds = item.gitem.sceneBoundingRect()\n self.selectBox.setRect(newBounds)\n self.selectBox.show()\n self.updateFormatList()\n \n def updateFormatList(self):\n current = self.ui.formatList.currentItem()\n if current is not None:\n current = str(current.text())\n self.ui.formatList.clear()\n self.exporterClasses = {}\n gotCurrent = False\n for exp in exporters.listExporters():\n self.ui.formatList.addItem(exp.Name)\n self.exporterClasses[exp.Name] = exp\n if exp.Name == current:\n self.ui.formatList.setCurrentRow(self.ui.formatList.count()-1)\n gotCurrent = True\n \n if not gotCurrent:\n self.ui.formatList.setCurrentRow(0)\n \n def exportFormatChanged(self, item, prev):\n if item is None:\n self.currentExporter = None\n self.ui.paramTree.clear()\n return\n expClass = self.exporterClasses[str(item.text())]\n exp = expClass(item=self.ui.itemTree.currentItem().gitem)\n\n if prev:\n oldtext = str(prev.text())\n self.exporterParameters[oldtext] = self.currentExporter.parameters()\n newtext = str(item.text())\n if newtext in self.exporterParameters.keys():\n params = self.exporterParameters[newtext]\n exp.params = params\n else:\n params = exp.parameters()\n self.exporterParameters[newtext] = params\n\n if params is None:\n self.ui.paramTree.clear()\n else:\n self.ui.paramTree.setParameters(params)\n self.currentExporter = exp\n self.ui.copyBtn.setEnabled(exp.allowCopy)\n \n def exportClicked(self):\n self.selectBox.hide()\n self.currentExporter.export()\n \n def copyClicked(self):\n self.selectBox.hide()\n self.currentExporter.export(copy=True)\n \n def close(self):\n self.selectBox.setVisible(False)\n self.setVisible(False)\n\n def closeEvent(self, event):\n self.close()\n QtGui.QWidget.closeEvent(self, event)\n", "path": "pyqtgraph/GraphicsScene/exportDialog.py"}]}
| 2,949 | 280 |
gh_patches_debug_32324
|
rasdani/github-patches
|
git_diff
|
modal-labs__modal-examples-695
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
remove duplicate webscraper example once #669 is merged
</issue>
<code>
[start of 10_integrations/webscraper.py]
1 # ---
2 # runtimes: ["runc", "gvisor"]
3 # ---
4 import os
5
6 import modal
7
8 stub = modal.Stub("example-linkscraper")
9
10
11 playwright_image = modal.Image.debian_slim(
12 python_version="3.10"
13 ).run_commands( # Doesn't work with 3.11 yet
14 "apt-get update",
15 "apt-get install -y software-properties-common",
16 "apt-add-repository non-free",
17 "apt-add-repository contrib",
18 "pip install playwright==1.30.0",
19 "playwright install-deps chromium",
20 "playwright install chromium",
21 )
22
23
24 @stub.function(image=playwright_image)
25 async def get_links(url: str) -> set[str]:
26 from playwright.async_api import async_playwright
27
28 async with async_playwright() as p:
29 browser = await p.chromium.launch()
30 page = await browser.new_page()
31 await page.goto(url)
32 links = await page.eval_on_selector_all(
33 "a[href]", "elements => elements.map(element => element.href)"
34 )
35 await browser.close()
36
37 return set(links)
38
39
40 slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk")
41
42
43 @stub.function(
44 image=slack_sdk_image,
45 secrets=[modal.Secret.from_name("scraper-slack-secret")],
46 )
47 def bot_token_msg(channel, message):
48 import slack_sdk
49
50 print(f"Posting {message} to #{channel}")
51 client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"])
52 client.chat_postMessage(channel=channel, text=message)
53
54
55 @stub.function()
56 def scrape():
57 links_of_interest = ["http://modal.com"]
58
59 for links in get_links.map(links_of_interest):
60 for link in links:
61 bot_token_msg.remote("scraped-links", link)
62
63
64 @stub.function(schedule=modal.Period(days=1))
65 def daily_scrape():
66 scrape.remote()
67
68
69 @stub.local_entrypoint()
70 def run():
71 scrape.remote()
72
[end of 10_integrations/webscraper.py]
[start of misc/webscraper.py]
1 # ---
2 # runtimes: ["runc", "gvisor"]
3 # ---
4 import os
5
6 import modal
7
8 stub = modal.Stub("example-linkscraper")
9
10
11 playwright_image = modal.Image.debian_slim(
12 python_version="3.10"
13 ).run_commands( # Doesn't work with 3.11 yet
14 "apt-get update",
15 "apt-get install -y software-properties-common",
16 "apt-add-repository non-free",
17 "apt-add-repository contrib",
18 "pip install playwright==1.30.0",
19 "playwright install-deps chromium",
20 "playwright install chromium",
21 )
22
23
24 @stub.function(image=playwright_image)
25 async def get_links(url: str) -> set[str]:
26 from playwright.async_api import async_playwright
27
28 async with async_playwright() as p:
29 browser = await p.chromium.launch()
30 page = await browser.new_page()
31 await page.goto(url)
32 links = await page.eval_on_selector_all(
33 "a[href]", "elements => elements.map(element => element.href)"
34 )
35 await browser.close()
36
37 return set(links)
38
39
40 slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk")
41
42
43 @stub.function(
44 image=slack_sdk_image,
45 secrets=[modal.Secret.from_name("scraper-slack-secret")],
46 )
47 def bot_token_msg(channel, message):
48 import slack_sdk
49
50 print(f"Posting {message} to #{channel}")
51 client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"])
52 client.chat_postMessage(channel=channel, text=message)
53
54
55 @stub.function()
56 def scrape():
57 links_of_interest = ["http://modal.com"]
58
59 for links in get_links.map(links_of_interest):
60 for link in links:
61 bot_token_msg.remote("scraped-links", link)
62
63
64 @stub.function(schedule=modal.Period(days=1))
65 def daily_scrape():
66 scrape.remote()
67
68
69 @stub.local_entrypoint()
70 def run():
71 scrape.remote()
72
[end of misc/webscraper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/10_integrations/webscraper.py b/10_integrations/webscraper.py
--- a/10_integrations/webscraper.py
+++ b/10_integrations/webscraper.py
@@ -15,7 +15,7 @@
"apt-get install -y software-properties-common",
"apt-add-repository non-free",
"apt-add-repository contrib",
- "pip install playwright==1.30.0",
+ "pip install playwright==1.42.0",
"playwright install-deps chromium",
"playwright install chromium",
)
diff --git a/misc/webscraper.py b/misc/webscraper.py
deleted file mode 100644
--- a/misc/webscraper.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# ---
-# runtimes: ["runc", "gvisor"]
-# ---
-import os
-
-import modal
-
-stub = modal.Stub("example-linkscraper")
-
-
-playwright_image = modal.Image.debian_slim(
- python_version="3.10"
-).run_commands( # Doesn't work with 3.11 yet
- "apt-get update",
- "apt-get install -y software-properties-common",
- "apt-add-repository non-free",
- "apt-add-repository contrib",
- "pip install playwright==1.30.0",
- "playwright install-deps chromium",
- "playwright install chromium",
-)
-
-
[email protected](image=playwright_image)
-async def get_links(url: str) -> set[str]:
- from playwright.async_api import async_playwright
-
- async with async_playwright() as p:
- browser = await p.chromium.launch()
- page = await browser.new_page()
- await page.goto(url)
- links = await page.eval_on_selector_all(
- "a[href]", "elements => elements.map(element => element.href)"
- )
- await browser.close()
-
- return set(links)
-
-
-slack_sdk_image = modal.Image.debian_slim().pip_install("slack-sdk")
-
-
[email protected](
- image=slack_sdk_image,
- secrets=[modal.Secret.from_name("scraper-slack-secret")],
-)
-def bot_token_msg(channel, message):
- import slack_sdk
-
- print(f"Posting {message} to #{channel}")
- client = slack_sdk.WebClient(token=os.environ["SLACK_BOT_TOKEN"])
- client.chat_postMessage(channel=channel, text=message)
-
-
[email protected]()
-def scrape():
- links_of_interest = ["http://modal.com"]
-
- for links in get_links.map(links_of_interest):
- for link in links:
- bot_token_msg.remote("scraped-links", link)
-
-
[email protected](schedule=modal.Period(days=1))
-def daily_scrape():
- scrape.remote()
-
-
[email protected]_entrypoint()
-def run():
- scrape.remote()
|
{"golden_diff": "diff --git a/10_integrations/webscraper.py b/10_integrations/webscraper.py\n--- a/10_integrations/webscraper.py\n+++ b/10_integrations/webscraper.py\n@@ -15,7 +15,7 @@\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n- \"pip install playwright==1.30.0\",\n+ \"pip install playwright==1.42.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n )\ndiff --git a/misc/webscraper.py b/misc/webscraper.py\ndeleted file mode 100644\n--- a/misc/webscraper.py\n+++ /dev/null\n@@ -1,71 +0,0 @@\n-# ---\n-# runtimes: [\"runc\", \"gvisor\"]\n-# ---\n-import os\n-\n-import modal\n-\n-stub = modal.Stub(\"example-linkscraper\")\n-\n-\n-playwright_image = modal.Image.debian_slim(\n- python_version=\"3.10\"\n-).run_commands( # Doesn't work with 3.11 yet\n- \"apt-get update\",\n- \"apt-get install -y software-properties-common\",\n- \"apt-add-repository non-free\",\n- \"apt-add-repository contrib\",\n- \"pip install playwright==1.30.0\",\n- \"playwright install-deps chromium\",\n- \"playwright install chromium\",\n-)\n-\n-\[email protected](image=playwright_image)\n-async def get_links(url: str) -> set[str]:\n- from playwright.async_api import async_playwright\n-\n- async with async_playwright() as p:\n- browser = await p.chromium.launch()\n- page = await browser.new_page()\n- await page.goto(url)\n- links = await page.eval_on_selector_all(\n- \"a[href]\", \"elements => elements.map(element => element.href)\"\n- )\n- await browser.close()\n-\n- return set(links)\n-\n-\n-slack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n-\n-\[email protected](\n- image=slack_sdk_image,\n- secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n-)\n-def bot_token_msg(channel, message):\n- import slack_sdk\n-\n- print(f\"Posting {message} to #{channel}\")\n- client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n- client.chat_postMessage(channel=channel, text=message)\n-\n-\[email protected]()\n-def scrape():\n- links_of_interest = [\"http://modal.com\"]\n-\n- for links in get_links.map(links_of_interest):\n- for link in links:\n- bot_token_msg.remote(\"scraped-links\", link)\n-\n-\[email protected](schedule=modal.Period(days=1))\n-def daily_scrape():\n- scrape.remote()\n-\n-\[email protected]_entrypoint()\n-def run():\n- scrape.remote()\n", "issue": "remove duplicate webscraper example once #669 is merged\n\n", "before_files": [{"content": "# ---\n# runtimes: [\"runc\", \"gvisor\"]\n# ---\nimport os\n\nimport modal\n\nstub = modal.Stub(\"example-linkscraper\")\n\n\nplaywright_image = modal.Image.debian_slim(\n python_version=\"3.10\"\n).run_commands( # Doesn't work with 3.11 yet\n \"apt-get update\",\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n \"pip install playwright==1.30.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n)\n\n\[email protected](image=playwright_image)\nasync def get_links(url: str) -> set[str]:\n from playwright.async_api import async_playwright\n\n async with async_playwright() as p:\n browser = await p.chromium.launch()\n page = await browser.new_page()\n await page.goto(url)\n links = await page.eval_on_selector_all(\n \"a[href]\", \"elements => elements.map(element => element.href)\"\n )\n await browser.close()\n\n return set(links)\n\n\nslack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n\n\[email protected](\n image=slack_sdk_image,\n secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n)\ndef bot_token_msg(channel, message):\n import slack_sdk\n\n print(f\"Posting {message} to #{channel}\")\n client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(channel=channel, text=message)\n\n\[email protected]()\ndef scrape():\n links_of_interest = [\"http://modal.com\"]\n\n for links in get_links.map(links_of_interest):\n for link in links:\n bot_token_msg.remote(\"scraped-links\", link)\n\n\[email protected](schedule=modal.Period(days=1))\ndef daily_scrape():\n scrape.remote()\n\n\[email protected]_entrypoint()\ndef run():\n scrape.remote()\n", "path": "10_integrations/webscraper.py"}, {"content": "# ---\n# runtimes: [\"runc\", \"gvisor\"]\n# ---\nimport os\n\nimport modal\n\nstub = modal.Stub(\"example-linkscraper\")\n\n\nplaywright_image = modal.Image.debian_slim(\n python_version=\"3.10\"\n).run_commands( # Doesn't work with 3.11 yet\n \"apt-get update\",\n \"apt-get install -y software-properties-common\",\n \"apt-add-repository non-free\",\n \"apt-add-repository contrib\",\n \"pip install playwright==1.30.0\",\n \"playwright install-deps chromium\",\n \"playwright install chromium\",\n)\n\n\[email protected](image=playwright_image)\nasync def get_links(url: str) -> set[str]:\n from playwright.async_api import async_playwright\n\n async with async_playwright() as p:\n browser = await p.chromium.launch()\n page = await browser.new_page()\n await page.goto(url)\n links = await page.eval_on_selector_all(\n \"a[href]\", \"elements => elements.map(element => element.href)\"\n )\n await browser.close()\n\n return set(links)\n\n\nslack_sdk_image = modal.Image.debian_slim().pip_install(\"slack-sdk\")\n\n\[email protected](\n image=slack_sdk_image,\n secrets=[modal.Secret.from_name(\"scraper-slack-secret\")],\n)\ndef bot_token_msg(channel, message):\n import slack_sdk\n\n print(f\"Posting {message} to #{channel}\")\n client = slack_sdk.WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(channel=channel, text=message)\n\n\[email protected]()\ndef scrape():\n links_of_interest = [\"http://modal.com\"]\n\n for links in get_links.map(links_of_interest):\n for link in links:\n bot_token_msg.remote(\"scraped-links\", link)\n\n\[email protected](schedule=modal.Period(days=1))\ndef daily_scrape():\n scrape.remote()\n\n\[email protected]_entrypoint()\ndef run():\n scrape.remote()\n", "path": "misc/webscraper.py"}]}
| 1,709 | 665 |
gh_patches_debug_14271
|
rasdani/github-patches
|
git_diff
|
getsentry__snuba-3697
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
About the sessions-subscription-results subscription issue
### Environment
- sentry | snuba version :23.1.1
https://github.com/getsentry/snuba/pull/2737 ,@lynnagara Hello, I have a question about this pr, hope to get your answer, thank you very much
- After removing subscriptions-scheduler-executor-session support in snuba, how to write data to the topic of sessions-subscription-results? Because I see that the crash rate warning code in sentry is still there and has not changed, for example
- https://github.com/getsentry/sentry/pull/28526
https://github.com/getsentry/sentry/blob/8e00dcdf463d916b9ca79ddbe13e99f161d58db1/src/sentry/snuba/query_subscription_consumer.py#L61-L61
My original question is as follows, I have enabled the organizations:incidents function in sentry and subscribed to sessions-results through the following script
```bash
sentry
run
query-subscription-consumer
--topic=sessions-subscription-results
```
Because there is no data in the sessions-subscription-results topic, the crash rate alarm cannot work
<img width="1568" alt="image" src="https://user-images.githubusercontent.com/18591662/216570393-64748a25-1cd4-4980-966c-f7665dc8482b.png">
</issue>
<code>
[start of snuba/cli/subscriptions_scheduler_executor.py]
1 import signal
2 from contextlib import contextmanager
3 from typing import Any, Iterator, Optional, Sequence
4
5 import click
6 from arroyo import configure_metrics
7 from arroyo.backends.kafka import KafkaProducer
8
9 from snuba import environment, state
10 from snuba.attribution.log import flush_attribution_producer
11 from snuba.datasets.entities.entity_key import EntityKey
12 from snuba.datasets.entities.factory import get_entity
13 from snuba.environment import setup_logging, setup_sentry
14 from snuba.subscriptions.combined_scheduler_executor import (
15 build_scheduler_executor_consumer,
16 )
17 from snuba.utils.metrics.wrapper import MetricsWrapper
18 from snuba.utils.streams.configuration_builder import build_kafka_producer_configuration
19 from snuba.utils.streams.metrics_adapter import StreamMetricsAdapter
20
21
22 @click.command()
23 @click.option(
24 "--dataset",
25 "dataset_name",
26 required=True,
27 type=click.Choice(["events", "transactions", "metrics"]),
28 help="The dataset to target.",
29 )
30 @click.option(
31 "--entity",
32 "entity_names",
33 required=True,
34 multiple=True,
35 type=click.Choice(["events", "transactions", "metrics_counters", "metrics_sets"]),
36 help="The entity to target.",
37 )
38 @click.option(
39 "--consumer-group",
40 default="snuba-subscription-executor",
41 help="Consumer group used for consuming the scheduled subscription topic/s.",
42 )
43 @click.option(
44 "--followed-consumer-group",
45 required=True,
46 help="Name of the consumer group to follow",
47 )
48 @click.option(
49 "--total-concurrent-queries",
50 default=64,
51 type=int,
52 help="Total max number of concurrent queries for all replicas. Used to calculate max_concurrent_queries.",
53 )
54 @click.option(
55 "--auto-offset-reset",
56 default="error",
57 type=click.Choice(["error", "earliest", "latest"]),
58 help="Kafka consumer auto offset reset.",
59 )
60 @click.option(
61 "--no-strict-offset-reset",
62 is_flag=True,
63 help="Forces the kafka consumer auto offset reset.",
64 )
65 @click.option("--schedule-ttl", type=int, default=60 * 5)
66 @click.option("--delay-seconds", type=int)
67 @click.option(
68 "--stale-threshold-seconds",
69 type=int,
70 help="Skip scheduling if timestamp is beyond this threshold compared to the system time",
71 )
72 @click.option("--log-level", help="Logging level to use.")
73 def subscriptions_scheduler_executor(
74 *,
75 dataset_name: str,
76 entity_names: Sequence[str],
77 consumer_group: str,
78 followed_consumer_group: str,
79 total_concurrent_queries: int,
80 auto_offset_reset: str,
81 no_strict_offset_reset: bool,
82 schedule_ttl: int,
83 delay_seconds: Optional[int],
84 stale_threshold_seconds: Optional[int],
85 log_level: Optional[str],
86 ) -> None:
87 """
88 Combined subscriptions scheduler and executor. Alternative to the separate scheduler and executor processes.
89 """
90 setup_logging(log_level)
91 setup_sentry()
92
93 metrics = MetricsWrapper(
94 environment.metrics,
95 "subscriptions.scheduler_executor",
96 tags={"dataset": dataset_name},
97 )
98
99 configure_metrics(StreamMetricsAdapter(metrics))
100
101 # Just get the result topic configuration from the first entity. Later we
102 # check they all have the same result topic anyway before building the consumer.
103 entity_key = EntityKey(entity_names[0])
104
105 storage = get_entity(entity_key).get_writable_storage()
106 assert storage is not None
107 stream_loader = storage.get_table_writer().get_stream_loader()
108 result_topic_spec = stream_loader.get_subscription_scheduled_topic_spec()
109 assert result_topic_spec is not None
110
111 producer = KafkaProducer(
112 build_kafka_producer_configuration(
113 result_topic_spec.topic,
114 override_params={"partitioner": "consistent"},
115 )
116 )
117
118 processor = build_scheduler_executor_consumer(
119 dataset_name,
120 entity_names,
121 consumer_group,
122 followed_consumer_group,
123 producer,
124 auto_offset_reset,
125 not no_strict_offset_reset,
126 schedule_ttl,
127 delay_seconds,
128 stale_threshold_seconds,
129 total_concurrent_queries,
130 metrics,
131 )
132
133 def handler(signum: int, frame: Any) -> None:
134 processor.signal_shutdown()
135
136 signal.signal(signal.SIGINT, handler)
137 signal.signal(signal.SIGTERM, handler)
138
139 with closing(producer), flush_querylog(), flush_attribution_producer():
140 processor.run()
141
142
143 @contextmanager
144 def closing(producer: KafkaProducer) -> Iterator[Optional[KafkaProducer]]:
145 try:
146 yield producer
147 finally:
148 producer.close().result()
149
150
151 @contextmanager
152 def flush_querylog() -> Iterator[None]:
153 try:
154 yield
155 finally:
156 state.flush_producer()
157
[end of snuba/cli/subscriptions_scheduler_executor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snuba/cli/subscriptions_scheduler_executor.py b/snuba/cli/subscriptions_scheduler_executor.py
--- a/snuba/cli/subscriptions_scheduler_executor.py
+++ b/snuba/cli/subscriptions_scheduler_executor.py
@@ -24,7 +24,7 @@
"--dataset",
"dataset_name",
required=True,
- type=click.Choice(["events", "transactions", "metrics"]),
+ type=click.Choice(["events", "transactions", "metrics", "sessions"]),
help="The dataset to target.",
)
@click.option(
@@ -32,7 +32,9 @@
"entity_names",
required=True,
multiple=True,
- type=click.Choice(["events", "transactions", "metrics_counters", "metrics_sets"]),
+ type=click.Choice(
+ ["events", "transactions", "metrics_counters", "metrics_sets", "sessions"]
+ ),
help="The entity to target.",
)
@click.option(
|
{"golden_diff": "diff --git a/snuba/cli/subscriptions_scheduler_executor.py b/snuba/cli/subscriptions_scheduler_executor.py\n--- a/snuba/cli/subscriptions_scheduler_executor.py\n+++ b/snuba/cli/subscriptions_scheduler_executor.py\n@@ -24,7 +24,7 @@\n \"--dataset\",\n \"dataset_name\",\n required=True,\n- type=click.Choice([\"events\", \"transactions\", \"metrics\"]),\n+ type=click.Choice([\"events\", \"transactions\", \"metrics\", \"sessions\"]),\n help=\"The dataset to target.\",\n )\n @click.option(\n@@ -32,7 +32,9 @@\n \"entity_names\",\n required=True,\n multiple=True,\n- type=click.Choice([\"events\", \"transactions\", \"metrics_counters\", \"metrics_sets\"]),\n+ type=click.Choice(\n+ [\"events\", \"transactions\", \"metrics_counters\", \"metrics_sets\", \"sessions\"]\n+ ),\n help=\"The entity to target.\",\n )\n @click.option(\n", "issue": "About the sessions-subscription-results subscription issue\n### Environment\r\n\r\n- sentry | snuba version \uff1a23.1.1\r\n\r\nhttps://github.com/getsentry/snuba/pull/2737 \uff0c@lynnagara Hello, I have a question about this pr, hope to get your answer, thank you very much\r\n\r\n- After removing subscriptions-scheduler-executor-session support in snuba, how to write data to the topic of sessions-subscription-results? Because I see that the crash rate warning code in sentry is still there and has not changed, for example\r\n\r\n- https://github.com/getsentry/sentry/pull/28526\r\n\r\nhttps://github.com/getsentry/sentry/blob/8e00dcdf463d916b9ca79ddbe13e99f161d58db1/src/sentry/snuba/query_subscription_consumer.py#L61-L61\r\n\r\nMy original question is as follows, I have enabled the organizations:incidents function in sentry and subscribed to sessions-results through the following script\r\n```bash\r\nsentry\r\nrun\r\nquery-subscription-consumer\r\n--topic=sessions-subscription-results\r\n```\r\nBecause there is no data in the sessions-subscription-results topic, the crash rate alarm cannot work\r\n<img width=\"1568\" alt=\"image\" src=\"https://user-images.githubusercontent.com/18591662/216570393-64748a25-1cd4-4980-966c-f7665dc8482b.png\">\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import signal\nfrom contextlib import contextmanager\nfrom typing import Any, Iterator, Optional, Sequence\n\nimport click\nfrom arroyo import configure_metrics\nfrom arroyo.backends.kafka import KafkaProducer\n\nfrom snuba import environment, state\nfrom snuba.attribution.log import flush_attribution_producer\nfrom snuba.datasets.entities.entity_key import EntityKey\nfrom snuba.datasets.entities.factory import get_entity\nfrom snuba.environment import setup_logging, setup_sentry\nfrom snuba.subscriptions.combined_scheduler_executor import (\n build_scheduler_executor_consumer,\n)\nfrom snuba.utils.metrics.wrapper import MetricsWrapper\nfrom snuba.utils.streams.configuration_builder import build_kafka_producer_configuration\nfrom snuba.utils.streams.metrics_adapter import StreamMetricsAdapter\n\n\[email protected]()\[email protected](\n \"--dataset\",\n \"dataset_name\",\n required=True,\n type=click.Choice([\"events\", \"transactions\", \"metrics\"]),\n help=\"The dataset to target.\",\n)\[email protected](\n \"--entity\",\n \"entity_names\",\n required=True,\n multiple=True,\n type=click.Choice([\"events\", \"transactions\", \"metrics_counters\", \"metrics_sets\"]),\n help=\"The entity to target.\",\n)\[email protected](\n \"--consumer-group\",\n default=\"snuba-subscription-executor\",\n help=\"Consumer group used for consuming the scheduled subscription topic/s.\",\n)\[email protected](\n \"--followed-consumer-group\",\n required=True,\n help=\"Name of the consumer group to follow\",\n)\[email protected](\n \"--total-concurrent-queries\",\n default=64,\n type=int,\n help=\"Total max number of concurrent queries for all replicas. Used to calculate max_concurrent_queries.\",\n)\[email protected](\n \"--auto-offset-reset\",\n default=\"error\",\n type=click.Choice([\"error\", \"earliest\", \"latest\"]),\n help=\"Kafka consumer auto offset reset.\",\n)\[email protected](\n \"--no-strict-offset-reset\",\n is_flag=True,\n help=\"Forces the kafka consumer auto offset reset.\",\n)\[email protected](\"--schedule-ttl\", type=int, default=60 * 5)\[email protected](\"--delay-seconds\", type=int)\[email protected](\n \"--stale-threshold-seconds\",\n type=int,\n help=\"Skip scheduling if timestamp is beyond this threshold compared to the system time\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef subscriptions_scheduler_executor(\n *,\n dataset_name: str,\n entity_names: Sequence[str],\n consumer_group: str,\n followed_consumer_group: str,\n total_concurrent_queries: int,\n auto_offset_reset: str,\n no_strict_offset_reset: bool,\n schedule_ttl: int,\n delay_seconds: Optional[int],\n stale_threshold_seconds: Optional[int],\n log_level: Optional[str],\n) -> None:\n \"\"\"\n Combined subscriptions scheduler and executor. Alternative to the separate scheduler and executor processes.\n \"\"\"\n setup_logging(log_level)\n setup_sentry()\n\n metrics = MetricsWrapper(\n environment.metrics,\n \"subscriptions.scheduler_executor\",\n tags={\"dataset\": dataset_name},\n )\n\n configure_metrics(StreamMetricsAdapter(metrics))\n\n # Just get the result topic configuration from the first entity. Later we\n # check they all have the same result topic anyway before building the consumer.\n entity_key = EntityKey(entity_names[0])\n\n storage = get_entity(entity_key).get_writable_storage()\n assert storage is not None\n stream_loader = storage.get_table_writer().get_stream_loader()\n result_topic_spec = stream_loader.get_subscription_scheduled_topic_spec()\n assert result_topic_spec is not None\n\n producer = KafkaProducer(\n build_kafka_producer_configuration(\n result_topic_spec.topic,\n override_params={\"partitioner\": \"consistent\"},\n )\n )\n\n processor = build_scheduler_executor_consumer(\n dataset_name,\n entity_names,\n consumer_group,\n followed_consumer_group,\n producer,\n auto_offset_reset,\n not no_strict_offset_reset,\n schedule_ttl,\n delay_seconds,\n stale_threshold_seconds,\n total_concurrent_queries,\n metrics,\n )\n\n def handler(signum: int, frame: Any) -> None:\n processor.signal_shutdown()\n\n signal.signal(signal.SIGINT, handler)\n signal.signal(signal.SIGTERM, handler)\n\n with closing(producer), flush_querylog(), flush_attribution_producer():\n processor.run()\n\n\n@contextmanager\ndef closing(producer: KafkaProducer) -> Iterator[Optional[KafkaProducer]]:\n try:\n yield producer\n finally:\n producer.close().result()\n\n\n@contextmanager\ndef flush_querylog() -> Iterator[None]:\n try:\n yield\n finally:\n state.flush_producer()\n", "path": "snuba/cli/subscriptions_scheduler_executor.py"}]}
| 2,252 | 206 |
gh_patches_debug_20728
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5096
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/inference/engine/policies/llama.py]
1 from functools import partial
2 from typing import List
3
4 import torch
5 from torch.nn import Module
6 from transformers.models.llama.modeling_llama import (
7 LlamaAttention,
8 LlamaDecoderLayer,
9 LlamaForCausalLM,
10 LlamaModel,
11 LlamaRMSNorm,
12 )
13
14 from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription
15
16 # import colossalai
17 from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy
18
19 from ..modeling._utils import init_to_get_rotary
20 from ..modeling.llama import LlamaInferenceForwards
21
22 try:
23 from colossalai.kernel.triton import rmsnorm_forward
24
25 HAS_TRITON_RMSNORM = True
26 except:
27 print("you should install triton from https://github.com/openai/triton")
28 HAS_TRITON_RMSNORM = False
29
30
31 def get_triton_rmsnorm_forward():
32 if HAS_TRITON_RMSNORM:
33
34 def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):
35 return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
36
37 return _triton_rmsnorm_forward
38 else:
39 return None
40
41
42 class LlamaModelInferPolicy(LlamaForCausalLMPolicy):
43 def __init__(self) -> None:
44 super().__init__()
45
46 def module_policy(self):
47 policy = super().module_policy()
48 decoder_attribute_replacement = {
49 "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,
50 "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,
51 "self_attn.num_key_value_heads": self.model.config.num_key_value_heads
52 // self.shard_config.tensor_parallel_size,
53 }
54 if self.shard_config.extra_kwargs.get("quant", None) == "gptq":
55 from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear
56
57 policy[LlamaDecoderLayer] = ModulePolicyDescription(
58 attribute_replacement=decoder_attribute_replacement,
59 sub_module_replacement=[
60 SubModuleReplacementDescription(
61 suffix="self_attn.q_proj",
62 target_module=ColCaiQuantLinear,
63 kwargs={"split_num": 1},
64 ),
65 SubModuleReplacementDescription(
66 suffix="self_attn.k_proj",
67 target_module=ColCaiQuantLinear,
68 kwargs={"split_num": 1},
69 ),
70 SubModuleReplacementDescription(
71 suffix="self_attn.v_proj",
72 target_module=ColCaiQuantLinear,
73 kwargs={"split_num": 1},
74 ),
75 SubModuleReplacementDescription(
76 suffix="self_attn.o_proj",
77 target_module=RowCaiQuantLinear,
78 kwargs={"split_num": 1},
79 ),
80 SubModuleReplacementDescription(
81 suffix="mlp.gate_proj",
82 target_module=ColCaiQuantLinear,
83 kwargs={"split_num": 1},
84 ),
85 SubModuleReplacementDescription(
86 suffix="mlp.up_proj",
87 target_module=ColCaiQuantLinear,
88 kwargs={"split_num": 1},
89 ),
90 SubModuleReplacementDescription(
91 suffix="mlp.down_proj",
92 target_module=RowCaiQuantLinear,
93 kwargs={"split_num": 1},
94 ),
95 ],
96 )
97
98 elif self.shard_config.extra_kwargs.get("quant", None) == "smoothquant":
99 from colossalai.inference.quant.smoothquant.models.llama import LlamaSmoothquantDecoderLayer
100 from colossalai.inference.quant.smoothquant.models.parallel_linear import (
101 ColW8A8BFP32OFP32Linear,
102 RowW8A8B8O8Linear,
103 RowW8A8BFP32O32LinearSiLU,
104 RowW8A8BFP32OFP32Linear,
105 )
106
107 policy[LlamaSmoothquantDecoderLayer] = ModulePolicyDescription(
108 attribute_replacement=decoder_attribute_replacement,
109 sub_module_replacement=[
110 SubModuleReplacementDescription(
111 suffix="self_attn.q_proj",
112 target_module=RowW8A8B8O8Linear,
113 kwargs={"split_num": 1},
114 ),
115 SubModuleReplacementDescription(
116 suffix="self_attn.k_proj",
117 target_module=RowW8A8B8O8Linear,
118 kwargs={"split_num": 1},
119 ),
120 SubModuleReplacementDescription(
121 suffix="self_attn.v_proj",
122 target_module=RowW8A8B8O8Linear,
123 kwargs={"split_num": 1},
124 ),
125 SubModuleReplacementDescription(
126 suffix="self_attn.o_proj",
127 target_module=ColW8A8BFP32OFP32Linear,
128 kwargs={"split_num": 1},
129 ),
130 SubModuleReplacementDescription(
131 suffix="mlp.gate_proj",
132 target_module=RowW8A8BFP32O32LinearSiLU,
133 kwargs={"split_num": 1},
134 ),
135 SubModuleReplacementDescription(
136 suffix="mlp.up_proj",
137 target_module=RowW8A8BFP32OFP32Linear,
138 kwargs={"split_num": 1},
139 ),
140 SubModuleReplacementDescription(
141 suffix="mlp.down_proj",
142 target_module=ColW8A8BFP32OFP32Linear,
143 kwargs={"split_num": 1},
144 ),
145 ],
146 )
147 self.shard_config._infer()
148
149 infer_forward = LlamaInferenceForwards.llama_model_forward
150 method_replacement = {"forward": partial(infer_forward)}
151 self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)
152
153 infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward
154 method_replacement = {"forward": partial(infer_forward)}
155 self.append_or_create_method_replacement(
156 description=method_replacement, policy=policy, target_key=LlamaDecoderLayer
157 )
158
159 infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward
160 method_replacement = {"forward": partial(infer_forward)}
161 self.append_or_create_method_replacement(
162 description=method_replacement, policy=policy, target_key=LlamaAttention
163 )
164
165 # set as default, in inference we also use pipeline style forward, just setting stage as 1
166 self.set_pipeline_forward(
167 model_cls=LlamaForCausalLM, new_forward=LlamaInferenceForwards.llama_causal_lm_forward, policy=policy
168 )
169
170 infer_forward = None
171 if HAS_TRITON_RMSNORM:
172 infer_forward = get_triton_rmsnorm_forward()
173
174 if infer_forward is not None:
175 method_replacement = {"forward": partial(infer_forward)}
176 self.append_or_create_method_replacement(
177 description=method_replacement, policy=policy, target_key=LlamaRMSNorm
178 )
179
180 return policy
181
182 def postprocess(self):
183 init_to_get_rotary(self.model.model)
184 return self.model
185
186 def get_held_layers(self) -> List[Module]:
187 """Get pipeline layers for current stage."""
188 assert self.pipeline_stage_manager is not None
189
190 if self.model.__class__.__name__ == "LlamaModel":
191 module = self.model
192 else:
193 module = self.model.model
194 stage_manager = self.pipeline_stage_manager
195
196 held_layers = []
197 layers_per_stage = self.distribute_layers(len(module.layers), stage_manager.num_stages)
198 if stage_manager.is_first_stage():
199 held_layers.append(module.embed_tokens)
200 held_layers.append(self.model.lm_head)
201 start_idx, end_idx = self.get_stage_index(layers_per_stage, stage_manager.stage)
202 held_layers.extend(module.layers[start_idx:end_idx])
203 if stage_manager.is_last_stage():
204 held_layers.append(module.norm)
205
206 return held_layers
207
[end of colossalai/inference/engine/policies/llama.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/inference/engine/policies/llama.py b/colossalai/inference/engine/policies/llama.py
--- a/colossalai/inference/engine/policies/llama.py
+++ b/colossalai/inference/engine/policies/llama.py
@@ -20,23 +20,17 @@
from ..modeling.llama import LlamaInferenceForwards
try:
- from colossalai.kernel.triton import rmsnorm_forward
-
+ from lightllm.models.llama.triton_kernel.rmsnorm import rmsnorm_forward as lightllm_rmsnorm_forward
HAS_TRITON_RMSNORM = True
except:
print("you should install triton from https://github.com/openai/triton")
HAS_TRITON_RMSNORM = False
-
-def get_triton_rmsnorm_forward():
- if HAS_TRITON_RMSNORM:
-
+if HAS_TRITON_RMSNORM:
+ def get_triton_rmsnorm_forward():
def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):
- return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
-
+ return lightllm_rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)
return _triton_rmsnorm_forward
- else:
- return None
class LlamaModelInferPolicy(LlamaForCausalLMPolicy):
|
{"golden_diff": "diff --git a/colossalai/inference/engine/policies/llama.py b/colossalai/inference/engine/policies/llama.py\n--- a/colossalai/inference/engine/policies/llama.py\n+++ b/colossalai/inference/engine/policies/llama.py\n@@ -20,23 +20,17 @@\n from ..modeling.llama import LlamaInferenceForwards\n \n try:\n- from colossalai.kernel.triton import rmsnorm_forward\n-\n+ from lightllm.models.llama.triton_kernel.rmsnorm import rmsnorm_forward as lightllm_rmsnorm_forward\n HAS_TRITON_RMSNORM = True\n except:\n print(\"you should install triton from https://github.com/openai/triton\")\n HAS_TRITON_RMSNORM = False\n \n-\n-def get_triton_rmsnorm_forward():\n- if HAS_TRITON_RMSNORM:\n-\n+if HAS_TRITON_RMSNORM:\n+ def get_triton_rmsnorm_forward():\n def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):\n- return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n-\n+ return lightllm_rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n return _triton_rmsnorm_forward\n- else:\n- return None\n \n \n class LlamaModelInferPolicy(LlamaForCausalLMPolicy):\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from functools import partial\nfrom typing import List\n\nimport torch\nfrom torch.nn import Module\nfrom transformers.models.llama.modeling_llama import (\n LlamaAttention,\n LlamaDecoderLayer,\n LlamaForCausalLM,\n LlamaModel,\n LlamaRMSNorm,\n)\n\nfrom colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n\n# import colossalai\nfrom colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n\nfrom ..modeling._utils import init_to_get_rotary\nfrom ..modeling.llama import LlamaInferenceForwards\n\ntry:\n from colossalai.kernel.triton import rmsnorm_forward\n\n HAS_TRITON_RMSNORM = True\nexcept:\n print(\"you should install triton from https://github.com/openai/triton\")\n HAS_TRITON_RMSNORM = False\n\n\ndef get_triton_rmsnorm_forward():\n if HAS_TRITON_RMSNORM:\n\n def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor):\n return rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon)\n\n return _triton_rmsnorm_forward\n else:\n return None\n\n\nclass LlamaModelInferPolicy(LlamaForCausalLMPolicy):\n def __init__(self) -> None:\n super().__init__()\n\n def module_policy(self):\n policy = super().module_policy()\n decoder_attribute_replacement = {\n \"self_attn.hidden_size\": self.model.config.hidden_size // self.shard_config.tensor_parallel_size,\n \"self_attn.num_heads\": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size,\n \"self_attn.num_key_value_heads\": self.model.config.num_key_value_heads\n // self.shard_config.tensor_parallel_size,\n }\n if self.shard_config.extra_kwargs.get(\"quant\", None) == \"gptq\":\n from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear\n\n policy[LlamaDecoderLayer] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=ColCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=RowCaiQuantLinear,\n kwargs={\"split_num\": 1},\n ),\n ],\n )\n\n elif self.shard_config.extra_kwargs.get(\"quant\", None) == \"smoothquant\":\n from colossalai.inference.quant.smoothquant.models.llama import LlamaSmoothquantDecoderLayer\n from colossalai.inference.quant.smoothquant.models.parallel_linear import (\n ColW8A8BFP32OFP32Linear,\n RowW8A8B8O8Linear,\n RowW8A8BFP32O32LinearSiLU,\n RowW8A8BFP32OFP32Linear,\n )\n\n policy[LlamaSmoothquantDecoderLayer] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"self_attn.q_proj\",\n target_module=RowW8A8B8O8Linear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.k_proj\",\n target_module=RowW8A8B8O8Linear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.v_proj\",\n target_module=RowW8A8B8O8Linear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn.o_proj\",\n target_module=ColW8A8BFP32OFP32Linear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.gate_proj\",\n target_module=RowW8A8BFP32O32LinearSiLU,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.up_proj\",\n target_module=RowW8A8BFP32OFP32Linear,\n kwargs={\"split_num\": 1},\n ),\n SubModuleReplacementDescription(\n suffix=\"mlp.down_proj\",\n target_module=ColW8A8BFP32OFP32Linear,\n kwargs={\"split_num\": 1},\n ),\n ],\n )\n self.shard_config._infer()\n\n infer_forward = LlamaInferenceForwards.llama_model_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n\n infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n )\n\n infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaAttention\n )\n\n # set as default, in inference we also use pipeline style forward, just setting stage as 1\n self.set_pipeline_forward(\n model_cls=LlamaForCausalLM, new_forward=LlamaInferenceForwards.llama_causal_lm_forward, policy=policy\n )\n\n infer_forward = None\n if HAS_TRITON_RMSNORM:\n infer_forward = get_triton_rmsnorm_forward()\n\n if infer_forward is not None:\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaRMSNorm\n )\n\n return policy\n\n def postprocess(self):\n init_to_get_rotary(self.model.model)\n return self.model\n\n def get_held_layers(self) -> List[Module]:\n \"\"\"Get pipeline layers for current stage.\"\"\"\n assert self.pipeline_stage_manager is not None\n\n if self.model.__class__.__name__ == \"LlamaModel\":\n module = self.model\n else:\n module = self.model.model\n stage_manager = self.pipeline_stage_manager\n\n held_layers = []\n layers_per_stage = self.distribute_layers(len(module.layers), stage_manager.num_stages)\n if stage_manager.is_first_stage():\n held_layers.append(module.embed_tokens)\n held_layers.append(self.model.lm_head)\n start_idx, end_idx = self.get_stage_index(layers_per_stage, stage_manager.stage)\n held_layers.extend(module.layers[start_idx:end_idx])\n if stage_manager.is_last_stage():\n held_layers.append(module.norm)\n\n return held_layers\n", "path": "colossalai/inference/engine/policies/llama.py"}]}
| 2,803 | 321 |
gh_patches_debug_24137
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.vmware-1007
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change service startup policy with vmware_host_service_manager without defining service state
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
When setting up the service in vSphere Client, the actions to set startup policy or service state are independent. However, when setting the service startup policy using the `vmware_host_service_manager` module, you have to specify the service state if you don't want to start the service, which is the default behavior.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
`vmware_host_service_manager` in community.vmware v1.11.0
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Using the Ansible module should probably match the vSphere Client management behavior. It is not necessary to know the service state before changing the startup policy. The workaround is to use `vmware_host_service_info` module to first gather the state and then use it (which is kinda complicated way as the output values of the variable `running` from the `vmware_host_service_info` don't match the input values of the variable `state` in `vmware_host_service_manager`).
The `state` value of `unchanged` could be added (and set as default?). The current default is `start`.
Example playbook with the changes implemented:
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- hosts: localhost
gather_facts: false
tasks:
- name: Disabling SSH service
community.vmware.vmware_host_service_manager:
esxi_hostname: <esxi_hostname>
service_name: 'TSM-SSH'
service_policy: 'off'
state: 'unchanged'
```
<!--- HINT: You can also paste gist.github.com links for larger files -->
</issue>
<code>
[start of plugins/modules/vmware_host_service_manager.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 DOCUMENTATION = r'''
12 ---
13 module: vmware_host_service_manager
14 short_description: Manage services on a given ESXi host
15 description:
16 - This module can be used to manage (start, stop, restart) services on a given ESXi host.
17 - If cluster_name is provided, specified service will be managed on all ESXi host belonging to that cluster.
18 - If specific esxi_hostname is provided, then specified service will be managed on given ESXi host only.
19 author:
20 - Abhijeet Kasurde (@Akasurde)
21 notes:
22 - Tested on vSphere 6.5
23 requirements:
24 - python >= 2.6
25 - PyVmomi
26 options:
27 cluster_name:
28 description:
29 - Name of the cluster.
30 - Service settings are applied to every ESXi host system/s in given cluster.
31 - If C(esxi_hostname) is not given, this parameter is required.
32 type: str
33 esxi_hostname:
34 description:
35 - ESXi hostname.
36 - Service settings are applied to this ESXi host system.
37 - If C(cluster_name) is not given, this parameter is required.
38 type: str
39 state:
40 description:
41 - Desired state of service.
42 - "State value 'start' and 'present' has same effect."
43 - "State value 'stop' and 'absent' has same effect."
44 choices: [ absent, present, restart, start, stop ]
45 type: str
46 default: 'start'
47 service_policy:
48 description:
49 - Set of valid service policy strings.
50 - If set C(on), then service should be started when the host starts up.
51 - If set C(automatic), then service should run if and only if it has open firewall ports.
52 - If set C(off), then Service should not be started when the host starts up.
53 choices: [ 'automatic', 'off', 'on' ]
54 type: str
55 service_name:
56 description:
57 - Name of Service to be managed. This is a brief identifier for the service, for example, ntpd, vxsyslogd etc.
58 - This value should be a valid ESXi service name.
59 required: True
60 type: str
61 extends_documentation_fragment:
62 - community.vmware.vmware.documentation
63
64 '''
65
66 EXAMPLES = r'''
67 - name: Start ntpd service setting for all ESXi Host in given Cluster
68 community.vmware.vmware_host_service_manager:
69 hostname: '{{ vcenter_hostname }}'
70 username: '{{ vcenter_username }}'
71 password: '{{ vcenter_password }}'
72 cluster_name: '{{ cluster_name }}'
73 service_name: ntpd
74 state: present
75 delegate_to: localhost
76
77 - name: Start ntpd setting for an ESXi Host
78 community.vmware.vmware_host_service_manager:
79 hostname: '{{ vcenter_hostname }}'
80 username: '{{ vcenter_username }}'
81 password: '{{ vcenter_password }}'
82 esxi_hostname: '{{ esxi_hostname }}'
83 service_name: ntpd
84 state: present
85 delegate_to: localhost
86
87 - name: Start ntpd setting for an ESXi Host with Service policy
88 community.vmware.vmware_host_service_manager:
89 hostname: '{{ vcenter_hostname }}'
90 username: '{{ vcenter_username }}'
91 password: '{{ vcenter_password }}'
92 esxi_hostname: '{{ esxi_hostname }}'
93 service_name: ntpd
94 service_policy: on
95 state: present
96 delegate_to: localhost
97
98 - name: Stop ntpd setting for an ESXi Host
99 community.vmware.vmware_host_service_manager:
100 hostname: '{{ vcenter_hostname }}'
101 username: '{{ vcenter_username }}'
102 password: '{{ vcenter_password }}'
103 esxi_hostname: '{{ esxi_hostname }}'
104 service_name: ntpd
105 state: absent
106 delegate_to: localhost
107 '''
108
109 RETURN = r'''#
110 '''
111
112 try:
113 from pyVmomi import vim, vmodl
114 except ImportError:
115 pass
116
117 from ansible.module_utils.basic import AnsibleModule
118 from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
119 from ansible.module_utils._text import to_native
120
121
122 class VmwareServiceManager(PyVmomi):
123 def __init__(self, module):
124 super(VmwareServiceManager, self).__init__(module)
125 cluster_name = self.params.get('cluster_name', None)
126 esxi_host_name = self.params.get('esxi_hostname', None)
127 self.options = self.params.get('options', dict())
128 self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
129 self.desired_state = self.params.get('state')
130 self.desired_policy = self.params.get('service_policy', None)
131 self.service_name = self.params.get('service_name')
132 self.results = {}
133
134 def service_ctrl(self):
135 changed = False
136 host_service_state = []
137 for host in self.hosts:
138 actual_service_state, actual_service_policy = self.check_service_state(host=host, service_name=self.service_name)
139 host_service_system = host.configManager.serviceSystem
140 if host_service_system:
141 changed_state = False
142 self.results[host.name] = dict(service_name=self.service_name,
143 actual_service_state='running' if actual_service_state else 'stopped',
144 actual_service_policy=actual_service_policy,
145 desired_service_policy=self.desired_policy,
146 desired_service_state=self.desired_state,
147 error='',
148 )
149 try:
150 if self.desired_state in ['start', 'present']:
151 if not actual_service_state:
152 if not self.module.check_mode:
153 host_service_system.StartService(id=self.service_name)
154 changed_state = True
155 elif self.desired_state in ['stop', 'absent']:
156 if actual_service_state:
157 if not self.module.check_mode:
158 host_service_system.StopService(id=self.service_name)
159 changed_state = True
160 elif self.desired_state == 'restart':
161 if not self.module.check_mode:
162 host_service_system.RestartService(id=self.service_name)
163 changed_state = True
164
165 if self.desired_policy:
166 if actual_service_policy != self.desired_policy:
167 if not self.module.check_mode:
168 host_service_system.UpdateServicePolicy(id=self.service_name,
169 policy=self.desired_policy)
170 changed_state = True
171
172 host_service_state.append(changed_state)
173 self.results[host.name].update(changed=changed_state)
174 except (vim.fault.InvalidState, vim.fault.NotFound,
175 vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as e:
176 self.results[host.name].update(changed=False,
177 error=to_native(e.msg))
178
179 if any(host_service_state):
180 changed = True
181 self.module.exit_json(changed=changed, host_service_status=self.results)
182
183 def check_service_state(self, host, service_name):
184 host_service_system = host.configManager.serviceSystem
185 if host_service_system:
186 services = host_service_system.serviceInfo.service
187 for service in services:
188 if service.key == service_name:
189 return service.running, service.policy
190
191 msg = "Failed to find '%s' service on host system '%s'" % (service_name, host.name)
192 cluster_name = self.params.get('cluster_name', None)
193 if cluster_name:
194 msg += " located on cluster '%s'" % cluster_name
195 msg += ", please check if you have specified a valid ESXi service name."
196 self.module.fail_json(msg=msg)
197
198
199 def main():
200 argument_spec = vmware_argument_spec()
201 argument_spec.update(
202 cluster_name=dict(type='str', required=False),
203 esxi_hostname=dict(type='str', required=False),
204 state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop']),
205 service_name=dict(type='str', required=True),
206 service_policy=dict(type='str', choices=['automatic', 'off', 'on']),
207 )
208
209 module = AnsibleModule(
210 argument_spec=argument_spec,
211 required_one_of=[
212 ['cluster_name', 'esxi_hostname'],
213 ],
214 supports_check_mode=True
215 )
216
217 vmware_host_service = VmwareServiceManager(module)
218 vmware_host_service.service_ctrl()
219
220
221 if __name__ == "__main__":
222 main()
223
[end of plugins/modules/vmware_host_service_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/modules/vmware_host_service_manager.py b/plugins/modules/vmware_host_service_manager.py
--- a/plugins/modules/vmware_host_service_manager.py
+++ b/plugins/modules/vmware_host_service_manager.py
@@ -41,7 +41,8 @@
- Desired state of service.
- "State value 'start' and 'present' has same effect."
- "State value 'stop' and 'absent' has same effect."
- choices: [ absent, present, restart, start, stop ]
+ - State value C(unchanged) is added in version 1.14.0 to allow defining startup policy without defining or changing service state.
+ choices: [ absent, present, restart, start, stop, unchanged ]
type: str
default: 'start'
service_policy:
@@ -201,7 +202,7 @@
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
- state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop']),
+ state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop', 'unchanged']),
service_name=dict(type='str', required=True),
service_policy=dict(type='str', choices=['automatic', 'off', 'on']),
)
|
{"golden_diff": "diff --git a/plugins/modules/vmware_host_service_manager.py b/plugins/modules/vmware_host_service_manager.py\n--- a/plugins/modules/vmware_host_service_manager.py\n+++ b/plugins/modules/vmware_host_service_manager.py\n@@ -41,7 +41,8 @@\n - Desired state of service.\n - \"State value 'start' and 'present' has same effect.\"\n - \"State value 'stop' and 'absent' has same effect.\"\n- choices: [ absent, present, restart, start, stop ]\n+ - State value C(unchanged) is added in version 1.14.0 to allow defining startup policy without defining or changing service state.\n+ choices: [ absent, present, restart, start, stop, unchanged ]\n type: str\n default: 'start'\n service_policy:\n@@ -201,7 +202,7 @@\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='str', required=False),\n- state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop']),\n+ state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop', 'unchanged']),\n service_name=dict(type='str', required=True),\n service_policy=dict(type='str', choices=['automatic', 'off', 'on']),\n )\n", "issue": "Change service startup policy with vmware_host_service_manager without defining service state\n<!--- Verify first that your feature was not already discussed on GitHub -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\n<!--- Describe the new feature/improvement briefly below -->\r\nWhen setting up the service in vSphere Client, the actions to set startup policy or service state are independent. However, when setting the service startup policy using the `vmware_host_service_manager` module, you have to specify the service state if you don't want to start the service, which is the default behavior.\r\n\r\n##### ISSUE TYPE\r\n- Feature Idea\r\n\r\n##### COMPONENT NAME\r\n<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->\r\n`vmware_host_service_manager` in community.vmware v1.11.0\r\n\r\n##### ADDITIONAL INFORMATION\r\n<!--- Describe how the feature would be used, why it is needed and what it would solve -->\r\nUsing the Ansible module should probably match the vSphere Client management behavior. It is not necessary to know the service state before changing the startup policy. The workaround is to use `vmware_host_service_info` module to first gather the state and then use it (which is kinda complicated way as the output values of the variable `running` from the `vmware_host_service_info` don't match the input values of the variable `state` in `vmware_host_service_manager`).\r\n\r\nThe `state` value of `unchanged` could be added (and set as default?). The current default is `start`.\r\n\r\nExample playbook with the changes implemented:\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- hosts: localhost\r\n gather_facts: false\r\n tasks:\r\n - name: Disabling SSH service\r\n community.vmware.vmware_host_service_manager:\r\n esxi_hostname: <esxi_hostname>\r\n service_name: 'TSM-SSH'\r\n service_policy: 'off'\r\n state: 'unchanged'\r\n```\r\n\r\n<!--- HINT: You can also paste gist.github.com links for larger files -->\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_host_service_manager\nshort_description: Manage services on a given ESXi host\ndescription:\n- This module can be used to manage (start, stop, restart) services on a given ESXi host.\n- If cluster_name is provided, specified service will be managed on all ESXi host belonging to that cluster.\n- If specific esxi_hostname is provided, then specified service will be managed on given ESXi host only.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\noptions:\n cluster_name:\n description:\n - Name of the cluster.\n - Service settings are applied to every ESXi host system/s in given cluster.\n - If C(esxi_hostname) is not given, this parameter is required.\n type: str\n esxi_hostname:\n description:\n - ESXi hostname.\n - Service settings are applied to this ESXi host system.\n - If C(cluster_name) is not given, this parameter is required.\n type: str\n state:\n description:\n - Desired state of service.\n - \"State value 'start' and 'present' has same effect.\"\n - \"State value 'stop' and 'absent' has same effect.\"\n choices: [ absent, present, restart, start, stop ]\n type: str\n default: 'start'\n service_policy:\n description:\n - Set of valid service policy strings.\n - If set C(on), then service should be started when the host starts up.\n - If set C(automatic), then service should run if and only if it has open firewall ports.\n - If set C(off), then Service should not be started when the host starts up.\n choices: [ 'automatic', 'off', 'on' ]\n type: str\n service_name:\n description:\n - Name of Service to be managed. This is a brief identifier for the service, for example, ntpd, vxsyslogd etc.\n - This value should be a valid ESXi service name.\n required: True\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Start ntpd service setting for all ESXi Host in given Cluster\n community.vmware.vmware_host_service_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n cluster_name: '{{ cluster_name }}'\n service_name: ntpd\n state: present\n delegate_to: localhost\n\n- name: Start ntpd setting for an ESXi Host\n community.vmware.vmware_host_service_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n service_name: ntpd\n state: present\n delegate_to: localhost\n\n- name: Start ntpd setting for an ESXi Host with Service policy\n community.vmware.vmware_host_service_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n service_name: ntpd\n service_policy: on\n state: present\n delegate_to: localhost\n\n- name: Stop ntpd setting for an ESXi Host\n community.vmware.vmware_host_service_manager:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n service_name: ntpd\n state: absent\n delegate_to: localhost\n'''\n\nRETURN = r'''#\n'''\n\ntry:\n from pyVmomi import vim, vmodl\nexcept ImportError:\n pass\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\nfrom ansible.module_utils._text import to_native\n\n\nclass VmwareServiceManager(PyVmomi):\n def __init__(self, module):\n super(VmwareServiceManager, self).__init__(module)\n cluster_name = self.params.get('cluster_name', None)\n esxi_host_name = self.params.get('esxi_hostname', None)\n self.options = self.params.get('options', dict())\n self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)\n self.desired_state = self.params.get('state')\n self.desired_policy = self.params.get('service_policy', None)\n self.service_name = self.params.get('service_name')\n self.results = {}\n\n def service_ctrl(self):\n changed = False\n host_service_state = []\n for host in self.hosts:\n actual_service_state, actual_service_policy = self.check_service_state(host=host, service_name=self.service_name)\n host_service_system = host.configManager.serviceSystem\n if host_service_system:\n changed_state = False\n self.results[host.name] = dict(service_name=self.service_name,\n actual_service_state='running' if actual_service_state else 'stopped',\n actual_service_policy=actual_service_policy,\n desired_service_policy=self.desired_policy,\n desired_service_state=self.desired_state,\n error='',\n )\n try:\n if self.desired_state in ['start', 'present']:\n if not actual_service_state:\n if not self.module.check_mode:\n host_service_system.StartService(id=self.service_name)\n changed_state = True\n elif self.desired_state in ['stop', 'absent']:\n if actual_service_state:\n if not self.module.check_mode:\n host_service_system.StopService(id=self.service_name)\n changed_state = True\n elif self.desired_state == 'restart':\n if not self.module.check_mode:\n host_service_system.RestartService(id=self.service_name)\n changed_state = True\n\n if self.desired_policy:\n if actual_service_policy != self.desired_policy:\n if not self.module.check_mode:\n host_service_system.UpdateServicePolicy(id=self.service_name,\n policy=self.desired_policy)\n changed_state = True\n\n host_service_state.append(changed_state)\n self.results[host.name].update(changed=changed_state)\n except (vim.fault.InvalidState, vim.fault.NotFound,\n vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as e:\n self.results[host.name].update(changed=False,\n error=to_native(e.msg))\n\n if any(host_service_state):\n changed = True\n self.module.exit_json(changed=changed, host_service_status=self.results)\n\n def check_service_state(self, host, service_name):\n host_service_system = host.configManager.serviceSystem\n if host_service_system:\n services = host_service_system.serviceInfo.service\n for service in services:\n if service.key == service_name:\n return service.running, service.policy\n\n msg = \"Failed to find '%s' service on host system '%s'\" % (service_name, host.name)\n cluster_name = self.params.get('cluster_name', None)\n if cluster_name:\n msg += \" located on cluster '%s'\" % cluster_name\n msg += \", please check if you have specified a valid ESXi service name.\"\n self.module.fail_json(msg=msg)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='str', required=False),\n state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop']),\n service_name=dict(type='str', required=True),\n service_policy=dict(type='str', choices=['automatic', 'off', 'on']),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n required_one_of=[\n ['cluster_name', 'esxi_hostname'],\n ],\n supports_check_mode=True\n )\n\n vmware_host_service = VmwareServiceManager(module)\n vmware_host_service.service_ctrl()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/vmware_host_service_manager.py"}]}
| 3,423 | 315 |
gh_patches_debug_8435
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2418
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
akvo.rest.fields in from_native AttributeError: 'NoneType' object has no attribute 'name'
http://sentry.support.akvo-ops.org/rsr/live/group/1017/
Bug introduced in 1aa60508350d4477c1f0964db7f3c124d9d1bc01
</issue>
<code>
[start of akvo/rest/fields.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 import base64
9 import imghdr
10 import six
11 import uuid
12
13 from django.core.files.base import ContentFile
14 from django.utils.encoding import smart_text
15 from django.utils.translation import ugettext_lazy as _
16
17 from rest_framework import serializers
18 from rest_framework.fields import ImageField
19 from sorl.thumbnail import get_thumbnail
20 from sorl.thumbnail.parsers import ThumbnailParseError
21
22
23 class NonNullCharField(serializers.CharField):
24 """ Fix fo CharField so that '' is returned if the field value is None
25 see https://github.com/tomchristie/django-rest-framework/pull/1665
26 """
27 def from_native(self, value):
28 if isinstance(value, six.string_types):
29 return value
30 if value is None:
31 return u''
32 return smart_text(value)
33
34
35 class NonNullURLField(NonNullCharField, serializers.URLField):
36 pass
37
38
39 class Base64ImageField(ImageField):
40 """ A django-rest-framework field for handling image-uploads through raw post data.
41 It uses base64 for en-/decoding the contents of the file.
42 Now also supports thumbnails of different sizes. See to_native() for more info.
43 """
44 ALLOWED_IMAGE_TYPES = (
45 'gif',
46 'jpeg',
47 'jpg',
48 'png',
49 )
50 def from_native(self, base64_data):
51 # Check if this is a base64 string
52 if isinstance(base64_data, basestring):
53 # Try to decode the file. Return validation error if it fails.
54 try:
55 decoded_file = base64.b64decode(base64_data)
56 except TypeError:
57 raise serializers.ValidationError(_(u"Please upload a valid image."))
58
59 # Generate file name:
60 file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.
61 # Get the file name extension:
62 file_extension = self.get_file_extension(file_name, decoded_file)
63 self.check_file_extension(file_extension)
64 complete_file_name = file_name + "." + file_extension
65 data = ContentFile(decoded_file, name=complete_file_name)
66 else:
67 data = base64_data
68 file_extension = self.get_file_extension(data.name, data.read())
69 self.check_file_extension(file_extension)
70 data.seek(0)
71
72 return super(Base64ImageField, self).from_native(data)
73
74 def to_native(self, value):
75 """
76 :param value: A Base64ImageField object
77 :return: a path to a thumbnail with a predetermined size, the default thumb
78 OR
79 a dict with a number of thumbnails, one of which is the default, the others being generated
80 from the query string parameters, and finally the path to the original image keyed to
81 "original".
82
83 The extended functionality, allowing the generation of one or more thumbnails from the
84 original image is triggered by including "image_thumb_name" in the query string. The value
85 for image_thumb_name is a comma separated list of identifiers for the generated thumbs.
86 The names must not be "default" or "original".
87
88 For each thumb thus specified a size must be supplied as a query param on the form
89 image_thumb_<name>_<dimension>
90 where <name> is the name of the thumb specified as one of the values for image_thumb_name
91 and <dimension> is one of "width, "height" or "max_size". width and height must be an integer
92 specifying that dimension in pixels. The image will be scaled correctly in the other
93 dimension. max_size is width and height concatenated with an "x" and sets the maximum size
94 allowed for the respective dimensions, while still maintaining the correct aspect ratio of
95 the image.
96
97 Example:
98 the querystring
99 ?image_thumb_name=big,small&image_thumb_small_width=90&image_thumb_big_max_size=300x200
100 results in the following dict being returned:
101 {
102 'original': '/full/path/to/original/image.png',
103 'default': '/full/path/to/default/thumbnail/image.png',
104 'small': '/full/path/to/small/thumbnail/image.png',
105 'big': '/full/path/to/big/thumbnail/image.png',
106 }
107 This dict will be converted as appropriate to JSON or XML
108
109 NOTE: This special functionality works best when there is only one image field in a model.
110 If there are more, things will still work (I think), but for each image all thumbs returned
111 will have the same dimensions
112 """
113 def get_thumb(request, name):
114 if name not in [u'original', u'default']:
115 try:
116 width = request.GET.get('image_thumb_{}_width'.format(name))
117 if width:
118 return get_thumbnail(value, '{}'.format(width), quality=99)
119 height = request.GET.get('image_thumb_{}_height'.format(name))
120 if height:
121 return get_thumbnail(value, 'x{}'.format(height), quality=99)
122 # yes this is redundant...code is nearly identical with the width code above
123 # but for clarity of function we keep them separate
124 max_size = request.GET.get('image_thumb_{}_max_size'.format(name))
125 if max_size:
126 return get_thumbnail(value, '{}'.format(max_size), quality=99)
127 except (ThumbnailParseError, IOError):
128 return None
129
130 # no size specification matching the name found; give up
131 return None
132
133 if value:
134 default_width = '191' # width of update images on akvo.org/seeithappen
135 try:
136 default_thumb = get_thumbnail(value, default_width, quality=99)
137 request = self.context['request']
138 except (ThumbnailParseError, IOError, KeyError):
139 return None
140
141 # look for name(s) of thumb(s)
142 image_thumb_name = request.GET.get('image_thumb_name')
143 if image_thumb_name:
144 names = image_thumb_name.split(',')
145 thumbs = {u'original': value.url, u'default': default_thumb.url}
146 for name in names:
147 thumb = get_thumb(request, name)
148 if thumb is not None:
149 thumbs[name] = thumb.url
150 return thumbs
151 return default_thumb.url
152
153 def get_file_extension(self, filename, decoded_file):
154 extension = imghdr.what(filename, decoded_file)
155 extension = "jpg" if extension == "jpeg" else extension
156 return extension
157
158 def check_file_extension(self, file_extension):
159 if file_extension not in self.ALLOWED_IMAGE_TYPES:
160 formats = {'format': ', '.join(self.ALLOWED_IMAGE_TYPES)}
161 raise serializers.ValidationError(
162 _(u"Unknown image type. Only the following types are accepted: %(format)s") % formats
163 )
164
[end of akvo/rest/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rest/fields.py b/akvo/rest/fields.py
--- a/akvo/rest/fields.py
+++ b/akvo/rest/fields.py
@@ -48,8 +48,10 @@
'png',
)
def from_native(self, base64_data):
+ if base64_data is None:
+ data = base64_data
# Check if this is a base64 string
- if isinstance(base64_data, basestring):
+ elif isinstance(base64_data, basestring):
# Try to decode the file. Return validation error if it fails.
try:
decoded_file = base64.b64decode(base64_data)
|
{"golden_diff": "diff --git a/akvo/rest/fields.py b/akvo/rest/fields.py\n--- a/akvo/rest/fields.py\n+++ b/akvo/rest/fields.py\n@@ -48,8 +48,10 @@\n 'png',\n )\n def from_native(self, base64_data):\n+ if base64_data is None:\n+ data = base64_data\n # Check if this is a base64 string\n- if isinstance(base64_data, basestring):\n+ elif isinstance(base64_data, basestring):\n # Try to decode the file. Return validation error if it fails.\n try:\n decoded_file = base64.b64decode(base64_data)\n", "issue": "akvo.rest.fields in from_native AttributeError: 'NoneType' object has no attribute 'name'\nhttp://sentry.support.akvo-ops.org/rsr/live/group/1017/\n\nBug introduced in 1aa60508350d4477c1f0964db7f3c124d9d1bc01\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nimport base64\nimport imghdr\nimport six\nimport uuid\n\nfrom django.core.files.base import ContentFile\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework import serializers\nfrom rest_framework.fields import ImageField\nfrom sorl.thumbnail import get_thumbnail\nfrom sorl.thumbnail.parsers import ThumbnailParseError\n\n\nclass NonNullCharField(serializers.CharField):\n \"\"\" Fix fo CharField so that '' is returned if the field value is None\n see https://github.com/tomchristie/django-rest-framework/pull/1665\n \"\"\"\n def from_native(self, value):\n if isinstance(value, six.string_types):\n return value\n if value is None:\n return u''\n return smart_text(value)\n\n\nclass NonNullURLField(NonNullCharField, serializers.URLField):\n pass\n\n\nclass Base64ImageField(ImageField):\n \"\"\" A django-rest-framework field for handling image-uploads through raw post data.\n It uses base64 for en-/decoding the contents of the file.\n Now also supports thumbnails of different sizes. See to_native() for more info.\n \"\"\"\n ALLOWED_IMAGE_TYPES = (\n 'gif',\n 'jpeg',\n 'jpg',\n 'png',\n )\n def from_native(self, base64_data):\n # Check if this is a base64 string\n if isinstance(base64_data, basestring):\n # Try to decode the file. Return validation error if it fails.\n try:\n decoded_file = base64.b64decode(base64_data)\n except TypeError:\n raise serializers.ValidationError(_(u\"Please upload a valid image.\"))\n\n # Generate file name:\n file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough.\n # Get the file name extension:\n file_extension = self.get_file_extension(file_name, decoded_file)\n self.check_file_extension(file_extension)\n complete_file_name = file_name + \".\" + file_extension\n data = ContentFile(decoded_file, name=complete_file_name)\n else:\n data = base64_data\n file_extension = self.get_file_extension(data.name, data.read())\n self.check_file_extension(file_extension)\n data.seek(0)\n\n return super(Base64ImageField, self).from_native(data)\n\n def to_native(self, value):\n \"\"\"\n :param value: A Base64ImageField object\n :return: a path to a thumbnail with a predetermined size, the default thumb\n OR\n a dict with a number of thumbnails, one of which is the default, the others being generated\n from the query string parameters, and finally the path to the original image keyed to\n \"original\".\n\n The extended functionality, allowing the generation of one or more thumbnails from the\n original image is triggered by including \"image_thumb_name\" in the query string. The value\n for image_thumb_name is a comma separated list of identifiers for the generated thumbs.\n The names must not be \"default\" or \"original\".\n\n For each thumb thus specified a size must be supplied as a query param on the form\n image_thumb_<name>_<dimension>\n where <name> is the name of the thumb specified as one of the values for image_thumb_name\n and <dimension> is one of \"width, \"height\" or \"max_size\". width and height must be an integer\n specifying that dimension in pixels. The image will be scaled correctly in the other\n dimension. max_size is width and height concatenated with an \"x\" and sets the maximum size\n allowed for the respective dimensions, while still maintaining the correct aspect ratio of\n the image.\n\n Example:\n the querystring\n ?image_thumb_name=big,small&image_thumb_small_width=90&image_thumb_big_max_size=300x200\n results in the following dict being returned:\n {\n 'original': '/full/path/to/original/image.png',\n 'default': '/full/path/to/default/thumbnail/image.png',\n 'small': '/full/path/to/small/thumbnail/image.png',\n 'big': '/full/path/to/big/thumbnail/image.png',\n }\n This dict will be converted as appropriate to JSON or XML\n\n NOTE: This special functionality works best when there is only one image field in a model.\n If there are more, things will still work (I think), but for each image all thumbs returned\n will have the same dimensions\n \"\"\"\n def get_thumb(request, name):\n if name not in [u'original', u'default']:\n try:\n width = request.GET.get('image_thumb_{}_width'.format(name))\n if width:\n return get_thumbnail(value, '{}'.format(width), quality=99)\n height = request.GET.get('image_thumb_{}_height'.format(name))\n if height:\n return get_thumbnail(value, 'x{}'.format(height), quality=99)\n # yes this is redundant...code is nearly identical with the width code above\n # but for clarity of function we keep them separate\n max_size = request.GET.get('image_thumb_{}_max_size'.format(name))\n if max_size:\n return get_thumbnail(value, '{}'.format(max_size), quality=99)\n except (ThumbnailParseError, IOError):\n return None\n\n # no size specification matching the name found; give up\n return None\n\n if value:\n default_width = '191' # width of update images on akvo.org/seeithappen\n try:\n default_thumb = get_thumbnail(value, default_width, quality=99)\n request = self.context['request']\n except (ThumbnailParseError, IOError, KeyError):\n return None\n\n # look for name(s) of thumb(s)\n image_thumb_name = request.GET.get('image_thumb_name')\n if image_thumb_name:\n names = image_thumb_name.split(',')\n thumbs = {u'original': value.url, u'default': default_thumb.url}\n for name in names:\n thumb = get_thumb(request, name)\n if thumb is not None:\n thumbs[name] = thumb.url\n return thumbs\n return default_thumb.url\n\n def get_file_extension(self, filename, decoded_file):\n extension = imghdr.what(filename, decoded_file)\n extension = \"jpg\" if extension == \"jpeg\" else extension\n return extension\n\n def check_file_extension(self, file_extension):\n if file_extension not in self.ALLOWED_IMAGE_TYPES:\n formats = {'format': ', '.join(self.ALLOWED_IMAGE_TYPES)}\n raise serializers.ValidationError(\n _(u\"Unknown image type. Only the following types are accepted: %(format)s\") % formats\n )\n", "path": "akvo/rest/fields.py"}]}
| 2,518 | 159 |
gh_patches_debug_13891
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1862
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Radicale password check fails (length limited?)
So far, the webdav radicale is protected with basic auth (apparently htaccess or similar).
If user password > 8 then it fails to connect.
we should remove this limitation to let use stong passwords
Radicale password check fails (length limited?)
So far, the webdav radicale is protected with basic auth (apparently htaccess or similar).
If user password > 8 then it fails to connect.
we should remove this limitation to let use stong passwords
</issue>
<code>
[start of core/admin/mailu/internal/views/auth.py]
1 from mailu import models, utils
2 from mailu.internal import internal, nginx
3 from flask import current_app as app
4
5 import flask
6 import flask_login
7 import base64
8 import ipaddress
9
10
11 @internal.route("/auth/email")
12 def nginx_authentication():
13 """ Main authentication endpoint for Nginx email server
14 """
15 limiter = utils.limiter.get_limiter(app.config["AUTH_RATELIMIT"], "auth-ip")
16 client_ip = flask.request.headers["Client-Ip"]
17 if not limiter.test(client_ip):
18 response = flask.Response()
19 response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'
20 response.headers['Auth-Error-Code'] = '451 4.3.2'
21 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
22 response.headers['Auth-Wait'] = '3'
23 return response
24 headers = nginx.handle_authentication(flask.request.headers)
25 response = flask.Response()
26 for key, value in headers.items():
27 response.headers[key] = str(value)
28 if ("Auth-Status" not in headers) or (headers["Auth-Status"] != "OK"):
29 limit_subnet = str(app.config["AUTH_RATELIMIT_SUBNET"]) != 'False'
30 subnet = ipaddress.ip_network(app.config["SUBNET"])
31 if limit_subnet or ipaddress.ip_address(client_ip) not in subnet:
32 limiter.hit(flask.request.headers["Client-Ip"])
33 return response
34
35
36 @internal.route("/auth/admin")
37 def admin_authentication():
38 """ Fails if the user is not an authenticated admin.
39 """
40 if (not flask_login.current_user.is_anonymous
41 and flask_login.current_user.global_admin
42 and flask_login.current_user.enabled):
43 return ""
44 return flask.abort(403)
45
46 @internal.route("/auth/user")
47 def user_authentication():
48 """ Fails if the user is not authenticated.
49 """
50 if (not flask_login.current_user.is_anonymous
51 and flask_login.current_user.enabled):
52 response = flask.Response()
53 response.headers["X-User"] = flask_login.current_user.get_id()
54 response.headers["X-User-Token"] = models.User.get_temp_token(flask_login.current_user.get_id())
55 return response
56 return flask.abort(403)
57
58
59 @internal.route("/auth/basic")
60 def basic_authentication():
61 """ Tries to authenticate using the Authorization header.
62 """
63 authorization = flask.request.headers.get("Authorization")
64 if authorization and authorization.startswith("Basic "):
65 encoded = authorization.replace("Basic ", "")
66 user_email, password = base64.b64decode(encoded).split(b":")
67 user = models.User.query.get(user_email.decode("utf8"))
68 if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, "web"):
69 response = flask.Response()
70 response.headers["X-User"] = user.email
71 return response
72 response = flask.Response(status=401)
73 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"'
74 return response
75
[end of core/admin/mailu/internal/views/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -63,7 +63,7 @@
authorization = flask.request.headers.get("Authorization")
if authorization and authorization.startswith("Basic "):
encoded = authorization.replace("Basic ", "")
- user_email, password = base64.b64decode(encoded).split(b":")
+ user_email, password = base64.b64decode(encoded).split(b":", 1)
user = models.User.query.get(user_email.decode("utf8"))
if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, "web"):
response = flask.Response()
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -63,7 +63,7 @@\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n- user_email, password = base64.b64decode(encoded).split(b\":\")\n+ user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, \"web\"):\n response = flask.Response()\n", "issue": "Radicale password check fails (length limited?)\nSo far, the webdav radicale is protected with basic auth (apparently htaccess or similar).\r\nIf user password > 8 then it fails to connect. \r\nwe should remove this limitation to let use stong passwords\nRadicale password check fails (length limited?)\nSo far, the webdav radicale is protected with basic auth (apparently htaccess or similar).\r\nIf user password > 8 then it fails to connect. \r\nwe should remove this limitation to let use stong passwords\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\nimport ipaddress\n\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n limiter = utils.limiter.get_limiter(app.config[\"AUTH_RATELIMIT\"], \"auth-ip\")\n client_ip = flask.request.headers[\"Client-Ip\"]\n if not limiter.test(client_ip):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n if (\"Auth-Status\" not in headers) or (headers[\"Auth-Status\"] != \"OK\"):\n limit_subnet = str(app.config[\"AUTH_RATELIMIT_SUBNET\"]) != 'False'\n subnet = ipaddress.ip_network(app.config[\"SUBNET\"])\n if limit_subnet or ipaddress.ip_address(client_ip) not in subnet:\n limiter.hit(flask.request.headers[\"Client-Ip\"])\n return response\n\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\[email protected](\"/auth/user\")\ndef user_authentication():\n \"\"\" Fails if the user is not authenticated.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.enabled):\n response = flask.Response()\n response.headers[\"X-User\"] = flask_login.current_user.get_id()\n response.headers[\"X-User-Token\"] = models.User.get_temp_token(flask_login.current_user.get_id())\n return response\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\")\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if nginx.check_credentials(user, password.decode('utf-8'), flask.request.remote_addr, \"web\"):\n response = flask.Response()\n response.headers[\"X-User\"] = user.email\n return response\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]}
| 1,447 | 170 |
gh_patches_debug_5475
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-8069
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
i18n module does not seem to respect MESON_INSTALL_QUIET environment variable
**Describe the bug**
When using `meson install --quiet`, there's still output generated by what I presume is gettext:
```
Installing /root/build/po/be.gmo to /root/dest/usr/share/locale/be/LC_MESSAGES/systemd.mo
Installing /root/build/po/[email protected] to /root/dest/usr/share/locale/be@latin/LC_MESSAGES/systemd.mo
Installing /root/build/po/bg.gmo to /root/dest/usr/share/locale/bg/LC_MESSAGES/systemd.mo
Installing /root/build/po/ca.gmo to /root/dest/usr/share/locale/ca/LC_MESSAGES/systemd.mo
Installing /root/build/po/cs.gmo to /root/dest/usr/share/locale/cs/LC_MESSAGES/systemd.mo
Installing /root/build/po/da.gmo to /root/dest/usr/share/locale/da/LC_MESSAGES/systemd.mo
Installing /root/build/po/de.gmo to /root/dest/usr/share/locale/de/LC_MESSAGES/systemd.mo
Installing /root/build/po/el.gmo to /root/dest/usr/share/locale/el/LC_MESSAGES/systemd.mo
Installing /root/build/po/es.gmo to /root/dest/usr/share/locale/es/LC_MESSAGES/systemd.mo
Installing /root/build/po/fr.gmo to /root/dest/usr/share/locale/fr/LC_MESSAGES/systemd.mo
Installing /root/build/po/gl.gmo to /root/dest/usr/share/locale/gl/LC_MESSAGES/systemd.mo
Installing /root/build/po/hr.gmo to /root/dest/usr/share/locale/hr/LC_MESSAGES/systemd.mo
Installing /root/build/po/hu.gmo to /root/dest/usr/share/locale/hu/LC_MESSAGES/systemd.mo
Installing /root/build/po/id.gmo to /root/dest/usr/share/locale/id/LC_MESSAGES/systemd.mo
Installing /root/build/po/it.gmo to /root/dest/usr/share/locale/it/LC_MESSAGES/systemd.mo
Installing /root/build/po/ja.gmo to /root/dest/usr/share/locale/ja/LC_MESSAGES/systemd.mo
Installing /root/build/po/ko.gmo to /root/dest/usr/share/locale/ko/LC_MESSAGES/systemd.mo
Installing /root/build/po/lt.gmo to /root/dest/usr/share/locale/lt/LC_MESSAGES/systemd.mo
Installing /root/build/po/pl.gmo to /root/dest/usr/share/locale/pl/LC_MESSAGES/systemd.mo
Installing /root/build/po/pt_BR.gmo to /root/dest/usr/share/locale/pt_BR/LC_MESSAGES/systemd.mo
Installing /root/build/po/ro.gmo to /root/dest/usr/share/locale/ro/LC_MESSAGES/systemd.mo
Installing /root/build/po/ru.gmo to /root/dest/usr/share/locale/ru/LC_MESSAGES/systemd.mo
Installing /root/build/po/sk.gmo to /root/dest/usr/share/locale/sk/LC_MESSAGES/systemd.mo
Installing /root/build/po/sr.gmo to /root/dest/usr/share/locale/sr/LC_MESSAGES/systemd.mo
Installing /root/build/po/sv.gmo to /root/dest/usr/share/locale/sv/LC_MESSAGES/systemd.mo
Installing /root/build/po/tr.gmo to /root/dest/usr/share/locale/tr/LC_MESSAGES/systemd.mo
Installing /root/build/po/uk.gmo to /root/dest/usr/share/locale/uk/LC_MESSAGES/systemd.mo
Installing /root/build/po/zh_CN.gmo to /root/dest/usr/share/locale/zh_CN/LC_MESSAGES/systemd.mo
Installing /root/build/po/zh_TW.gmo to /root/dest/usr/share/locale/zh_TW/LC_MESSAGES/systemd.mo
```
**To Reproduce**
```
git clone https://github.com/systemd/systemd.git
meson build
meson compile -C build
meson install -C build --quiet
```
**Expected behavior**
No output from gettext when --quiet is passed to `meson install`
**system parameters**
Should not be relevant to this bug. Let me know if I should add them. I'm running latest meson (0.56.0)
</issue>
<code>
[start of mesonbuild/scripts/gettext.py]
1 # Copyright 2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17 import argparse
18 import subprocess
19 from . import destdir_join
20 import typing as T
21
22 parser = argparse.ArgumentParser()
23 parser.add_argument('command')
24 parser.add_argument('--pkgname', default='')
25 parser.add_argument('--datadirs', default='')
26 parser.add_argument('--langs', default='')
27 parser.add_argument('--localedir', default='')
28 parser.add_argument('--subdir', default='')
29 parser.add_argument('--extra-args', default='')
30
31 def read_linguas(src_sub: str) -> T.List[str]:
32 # Syntax of this file is documented here:
33 # https://www.gnu.org/software/gettext/manual/html_node/po_002fLINGUAS.html
34 linguas = os.path.join(src_sub, 'LINGUAS')
35 try:
36 langs = []
37 with open(linguas) as f:
38 for line in f:
39 line = line.strip()
40 if line and not line.startswith('#'):
41 langs += line.split()
42 return langs
43 except (FileNotFoundError, PermissionError):
44 print('Could not find file LINGUAS in {}'.format(src_sub))
45 return []
46
47 def run_potgen(src_sub: str, pkgname: str, datadirs: str, args: T.List[str]) -> int:
48 listfile = os.path.join(src_sub, 'POTFILES.in')
49 if not os.path.exists(listfile):
50 listfile = os.path.join(src_sub, 'POTFILES')
51 if not os.path.exists(listfile):
52 print('Could not find file POTFILES in %s' % src_sub)
53 return 1
54
55 child_env = os.environ.copy()
56 if datadirs:
57 child_env['GETTEXTDATADIRS'] = datadirs
58
59 ofile = os.path.join(src_sub, pkgname + '.pot')
60 return subprocess.call(['xgettext', '--package-name=' + pkgname, '-p', src_sub, '-f', listfile,
61 '-D', os.environ['MESON_SOURCE_ROOT'], '-k_', '-o', ofile] + args,
62 env=child_env)
63
64 def gen_gmo(src_sub: str, bld_sub: str, langs: T.List[str]) -> int:
65 for l in langs:
66 subprocess.check_call(['msgfmt', os.path.join(src_sub, l + '.po'),
67 '-o', os.path.join(bld_sub, l + '.gmo')])
68 return 0
69
70 def update_po(src_sub: str, pkgname: str, langs: T.List[str]) -> int:
71 potfile = os.path.join(src_sub, pkgname + '.pot')
72 for l in langs:
73 pofile = os.path.join(src_sub, l + '.po')
74 if os.path.exists(pofile):
75 subprocess.check_call(['msgmerge', '-q', '-o', pofile, pofile, potfile])
76 else:
77 subprocess.check_call(['msginit', '--input', potfile, '--output-file', pofile, '--locale', l, '--no-translator'])
78 return 0
79
80 def do_install(src_sub: str, bld_sub: str, dest: str, pkgname: str, langs: T.List[str]) -> int:
81 for l in langs:
82 srcfile = os.path.join(bld_sub, l + '.gmo')
83 outfile = os.path.join(dest, l, 'LC_MESSAGES',
84 pkgname + '.mo')
85 tempfile = outfile + '.tmp'
86 os.makedirs(os.path.dirname(outfile), exist_ok=True)
87 shutil.copy2(srcfile, tempfile)
88 os.replace(tempfile, outfile)
89 print('Installing %s to %s' % (srcfile, outfile))
90 return 0
91
92 def run(args: T.List[str]) -> int:
93 options = parser.parse_args(args)
94 subcmd = options.command
95 langs = options.langs.split('@@') if options.langs else None
96 extra_args = options.extra_args.split('@@') if options.extra_args else []
97 subdir = os.environ.get('MESON_SUBDIR', '')
98 if options.subdir:
99 subdir = options.subdir
100 src_sub = os.path.join(os.environ['MESON_SOURCE_ROOT'], subdir)
101 bld_sub = os.path.join(os.environ['MESON_BUILD_ROOT'], subdir)
102
103 if not langs:
104 langs = read_linguas(src_sub)
105
106 if subcmd == 'pot':
107 return run_potgen(src_sub, options.pkgname, options.datadirs, extra_args)
108 elif subcmd == 'gen_gmo':
109 return gen_gmo(src_sub, bld_sub, langs)
110 elif subcmd == 'update_po':
111 if run_potgen(src_sub, options.pkgname, options.datadirs, extra_args) != 0:
112 return 1
113 return update_po(src_sub, options.pkgname, langs)
114 elif subcmd == 'install':
115 destdir = os.environ.get('DESTDIR', '')
116 dest = destdir_join(destdir, os.path.join(os.environ['MESON_INSTALL_PREFIX'],
117 options.localedir))
118 if gen_gmo(src_sub, bld_sub, langs) != 0:
119 return 1
120 do_install(src_sub, bld_sub, dest, options.pkgname, langs)
121 else:
122 print('Unknown subcommand.')
123 return 1
124 return 0
125
[end of mesonbuild/scripts/gettext.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mesonbuild/scripts/gettext.py b/mesonbuild/scripts/gettext.py
--- a/mesonbuild/scripts/gettext.py
+++ b/mesonbuild/scripts/gettext.py
@@ -86,7 +86,8 @@
os.makedirs(os.path.dirname(outfile), exist_ok=True)
shutil.copy2(srcfile, tempfile)
os.replace(tempfile, outfile)
- print('Installing %s to %s' % (srcfile, outfile))
+ if not os.getenv('MESON_INSTALL_QUIET', False):
+ print('Installing %s to %s' % (srcfile, outfile))
return 0
def run(args: T.List[str]) -> int:
|
{"golden_diff": "diff --git a/mesonbuild/scripts/gettext.py b/mesonbuild/scripts/gettext.py\n--- a/mesonbuild/scripts/gettext.py\n+++ b/mesonbuild/scripts/gettext.py\n@@ -86,7 +86,8 @@\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n shutil.copy2(srcfile, tempfile)\n os.replace(tempfile, outfile)\n- print('Installing %s to %s' % (srcfile, outfile))\n+ if not os.getenv('MESON_INSTALL_QUIET', False):\n+ print('Installing %s to %s' % (srcfile, outfile))\n return 0\n \n def run(args: T.List[str]) -> int:\n", "issue": "i18n module does not seem to respect MESON_INSTALL_QUIET environment variable\n**Describe the bug**\r\n\r\nWhen using `meson install --quiet`, there's still output generated by what I presume is gettext:\r\n\r\n```\r\nInstalling /root/build/po/be.gmo to /root/dest/usr/share/locale/be/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/[email protected] to /root/dest/usr/share/locale/be@latin/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/bg.gmo to /root/dest/usr/share/locale/bg/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/ca.gmo to /root/dest/usr/share/locale/ca/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/cs.gmo to /root/dest/usr/share/locale/cs/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/da.gmo to /root/dest/usr/share/locale/da/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/de.gmo to /root/dest/usr/share/locale/de/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/el.gmo to /root/dest/usr/share/locale/el/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/es.gmo to /root/dest/usr/share/locale/es/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/fr.gmo to /root/dest/usr/share/locale/fr/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/gl.gmo to /root/dest/usr/share/locale/gl/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/hr.gmo to /root/dest/usr/share/locale/hr/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/hu.gmo to /root/dest/usr/share/locale/hu/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/id.gmo to /root/dest/usr/share/locale/id/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/it.gmo to /root/dest/usr/share/locale/it/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/ja.gmo to /root/dest/usr/share/locale/ja/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/ko.gmo to /root/dest/usr/share/locale/ko/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/lt.gmo to /root/dest/usr/share/locale/lt/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/pl.gmo to /root/dest/usr/share/locale/pl/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/pt_BR.gmo to /root/dest/usr/share/locale/pt_BR/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/ro.gmo to /root/dest/usr/share/locale/ro/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/ru.gmo to /root/dest/usr/share/locale/ru/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/sk.gmo to /root/dest/usr/share/locale/sk/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/sr.gmo to /root/dest/usr/share/locale/sr/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/sv.gmo to /root/dest/usr/share/locale/sv/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/tr.gmo to /root/dest/usr/share/locale/tr/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/uk.gmo to /root/dest/usr/share/locale/uk/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/zh_CN.gmo to /root/dest/usr/share/locale/zh_CN/LC_MESSAGES/systemd.mo\r\nInstalling /root/build/po/zh_TW.gmo to /root/dest/usr/share/locale/zh_TW/LC_MESSAGES/systemd.mo\r\n```\r\n\r\n**To Reproduce**\r\n\r\n```\r\ngit clone https://github.com/systemd/systemd.git\r\nmeson build\r\nmeson compile -C build\r\nmeson install -C build --quiet\r\n```\r\n\r\n**Expected behavior**\r\n\r\nNo output from gettext when --quiet is passed to `meson install`\r\n\r\n**system parameters**\r\n\r\nShould not be relevant to this bug. Let me know if I should add them. I'm running latest meson (0.56.0)\r\n\n", "before_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport argparse\nimport subprocess\nfrom . import destdir_join\nimport typing as T\n\nparser = argparse.ArgumentParser()\nparser.add_argument('command')\nparser.add_argument('--pkgname', default='')\nparser.add_argument('--datadirs', default='')\nparser.add_argument('--langs', default='')\nparser.add_argument('--localedir', default='')\nparser.add_argument('--subdir', default='')\nparser.add_argument('--extra-args', default='')\n\ndef read_linguas(src_sub: str) -> T.List[str]:\n # Syntax of this file is documented here:\n # https://www.gnu.org/software/gettext/manual/html_node/po_002fLINGUAS.html\n linguas = os.path.join(src_sub, 'LINGUAS')\n try:\n langs = []\n with open(linguas) as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith('#'):\n langs += line.split()\n return langs\n except (FileNotFoundError, PermissionError):\n print('Could not find file LINGUAS in {}'.format(src_sub))\n return []\n\ndef run_potgen(src_sub: str, pkgname: str, datadirs: str, args: T.List[str]) -> int:\n listfile = os.path.join(src_sub, 'POTFILES.in')\n if not os.path.exists(listfile):\n listfile = os.path.join(src_sub, 'POTFILES')\n if not os.path.exists(listfile):\n print('Could not find file POTFILES in %s' % src_sub)\n return 1\n\n child_env = os.environ.copy()\n if datadirs:\n child_env['GETTEXTDATADIRS'] = datadirs\n\n ofile = os.path.join(src_sub, pkgname + '.pot')\n return subprocess.call(['xgettext', '--package-name=' + pkgname, '-p', src_sub, '-f', listfile,\n '-D', os.environ['MESON_SOURCE_ROOT'], '-k_', '-o', ofile] + args,\n env=child_env)\n\ndef gen_gmo(src_sub: str, bld_sub: str, langs: T.List[str]) -> int:\n for l in langs:\n subprocess.check_call(['msgfmt', os.path.join(src_sub, l + '.po'),\n '-o', os.path.join(bld_sub, l + '.gmo')])\n return 0\n\ndef update_po(src_sub: str, pkgname: str, langs: T.List[str]) -> int:\n potfile = os.path.join(src_sub, pkgname + '.pot')\n for l in langs:\n pofile = os.path.join(src_sub, l + '.po')\n if os.path.exists(pofile):\n subprocess.check_call(['msgmerge', '-q', '-o', pofile, pofile, potfile])\n else:\n subprocess.check_call(['msginit', '--input', potfile, '--output-file', pofile, '--locale', l, '--no-translator'])\n return 0\n\ndef do_install(src_sub: str, bld_sub: str, dest: str, pkgname: str, langs: T.List[str]) -> int:\n for l in langs:\n srcfile = os.path.join(bld_sub, l + '.gmo')\n outfile = os.path.join(dest, l, 'LC_MESSAGES',\n pkgname + '.mo')\n tempfile = outfile + '.tmp'\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n shutil.copy2(srcfile, tempfile)\n os.replace(tempfile, outfile)\n print('Installing %s to %s' % (srcfile, outfile))\n return 0\n\ndef run(args: T.List[str]) -> int:\n options = parser.parse_args(args)\n subcmd = options.command\n langs = options.langs.split('@@') if options.langs else None\n extra_args = options.extra_args.split('@@') if options.extra_args else []\n subdir = os.environ.get('MESON_SUBDIR', '')\n if options.subdir:\n subdir = options.subdir\n src_sub = os.path.join(os.environ['MESON_SOURCE_ROOT'], subdir)\n bld_sub = os.path.join(os.environ['MESON_BUILD_ROOT'], subdir)\n\n if not langs:\n langs = read_linguas(src_sub)\n\n if subcmd == 'pot':\n return run_potgen(src_sub, options.pkgname, options.datadirs, extra_args)\n elif subcmd == 'gen_gmo':\n return gen_gmo(src_sub, bld_sub, langs)\n elif subcmd == 'update_po':\n if run_potgen(src_sub, options.pkgname, options.datadirs, extra_args) != 0:\n return 1\n return update_po(src_sub, options.pkgname, langs)\n elif subcmd == 'install':\n destdir = os.environ.get('DESTDIR', '')\n dest = destdir_join(destdir, os.path.join(os.environ['MESON_INSTALL_PREFIX'],\n options.localedir))\n if gen_gmo(src_sub, bld_sub, langs) != 0:\n return 1\n do_install(src_sub, bld_sub, dest, options.pkgname, langs)\n else:\n print('Unknown subcommand.')\n return 1\n return 0\n", "path": "mesonbuild/scripts/gettext.py"}]}
| 2,988 | 149 |
gh_patches_debug_2730
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-940
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
error when saving to drive other than current path
This is due to windows handling of drives. A minimal example:
``` python
import qcodes,os
datadir = r'd:\Temp'
qcodes.DataSet.default_io = qcodes.DiskIO(datadir)
p=qcodes.Parameter('p', set_cmd=None)
q=qcodes.Parameter('q', set_cmd=None)
ds=qcodes.Loop(p[0:10:1]).each(q).run() # fine
qcodes.DataSet.default_io = qcodes.DiskIO(r'c:\Temp')
ds=qcodes.Loop(p[0:10:1]).each(p).run() # error
```
This generates the error `ValueError: path is on mount 'd:', start on mount 'c:'`
Also see https://bugs.python.org/issue7195
</issue>
<code>
[start of qcodes/data/io.py]
1 """
2 IO managers for QCodes.
3
4 IO managers wrap whatever physical storage layer the user wants to use
5 in an interface mimicking the built-in <open> context manager, with
6 some restrictions to minimize the overhead in creating new IO managers.
7
8 The main thing these managers need to implement is the open context manager:
9
10 - Only the context manager needs to be implemented, not separate
11 open function and close methods.
12
13 - open takes the standard parameters:
14
15 - filename: (string)
16 - mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are
17 expected to be implemented. As with normal file objects, the only
18 difference between write and append is that write empties the file
19 before adding new data, and append leaves the existing contents in
20 place but starts writing at the end.
21 - encoding: If a special output encoding is desired. i.e. 'utf8
22
23 - the file-like object returned should implement a minimal set of operations.
24
25 In read mode:
26 - read([size]): read to the end or at most size bytes into a string
27 - readline([size]): read until a newline or up to size bytes, into a string
28 - iter(): usually return self, but can be any iterator over lines
29 - next(): assuming iter() returns self, this yields the next line.
30
31 In write or append mode:
32 - write(s): add string s to the end of the file.
33 - writelines(seq): add a sequence of strings
34
35 IO managers should also implement:
36
37 - a join method, ala os.path.join(\*args).
38 - a list method, that returns all objects matching location
39 - a remove method, ala os.remove(path) except that it will remove directories
40 as well as files, since we're allowing "locations" to be directories
41 or files.
42 """
43
44 from contextlib import contextmanager
45 import os
46 import re
47 import shutil
48 from fnmatch import fnmatch
49
50 ALLOWED_OPEN_MODES = ('r', 'w', 'a')
51
52
53 class DiskIO:
54
55 """
56 Simple IO object to wrap disk operations with a custom base location.
57
58 Also accepts both forward and backward slashes at any point, and
59 normalizes both to the OS we are currently on.
60
61 Args:
62 base_location (str): a path to the root data folder.
63 Converted to an absolute path immediately, so even if you supply a
64 relative path, later changes to the OS working directory will not
65 affect data paths.
66 """
67
68 def __init__(self, base_location):
69 if base_location is None:
70 self.base_location = None
71 else:
72 base_location = self._normalize_slashes(base_location)
73 self.base_location = os.path.abspath(base_location)
74
75 @contextmanager
76 def open(self, filename, mode, encoding=None):
77 """
78 Mimic the interface of the built in open context manager.
79
80 Args:
81 filename (str): path relative to base_location.
82
83 mode (str): 'r' (read), 'w' (write), or 'a' (append).
84 Other open modes are not supported because we don't want
85 to force all IO managers to support others.
86
87 Returns:
88 context manager yielding the open file
89 """
90 if mode not in ALLOWED_OPEN_MODES:
91 raise ValueError('mode {} not allowed in IO managers'.format(mode))
92
93 filepath = self.to_path(filename)
94
95 # make directories if needed
96 dirpath = os.path.dirname(filepath)
97 if not os.path.exists(dirpath):
98 os.makedirs(dirpath)
99
100 # normally we'd construct this context manager with try/finally, but
101 # here we already have a context manager for open so we just wrap it
102 with open(filepath, mode, encoding=encoding) as f:
103 yield f
104
105 def _normalize_slashes(self, location):
106 # note that this is NOT os.path.join - the difference is os.path.join
107 # discards empty strings, so if you use it on a re.split absolute
108 # path you will get a relative path!
109 return os.sep.join(re.split('[\\\\/]', location))
110
111 def to_path(self, location):
112 """
113 Convert a location string into a path on the local file system.
114
115 For DiskIO this just fixes slashes and prepends the base location,
116 doing nothing active with the file. But for other io managers that
117 refer to remote storage, this method may actually fetch the file and
118 put it at a temporary local path.
119
120 Args:
121 location (str): A location string for a complete dataset or
122 a file within it.
123
124 Returns:
125 path (str): The path on disk to which this location maps.
126 """
127 location = self._normalize_slashes(location)
128 if self.base_location:
129 return os.path.join(self.base_location, location)
130 else:
131 return location
132
133 def to_location(self, path):
134 """
135 Convert a local filesystem path into a location string.
136
137 Args:
138 path (str): a path on the local file system.
139
140 Returns:
141 location (str): the location string corresponding to this path.
142 """
143 if self.base_location:
144 return os.path.relpath(path, self.base_location)
145 else:
146 return path
147
148 def __repr__(self):
149 """Show the base location in the repr."""
150 return '<DiskIO, base_location={}>'.format(repr(self.base_location))
151
152 def join(self, *args):
153 """Context-dependent os.path.join for this io manager."""
154 return os.path.join(*list(map(self._normalize_slashes, args)))
155
156 def isfile(self, location):
157 """Check whether this location matches a file."""
158 path = self.to_path(location)
159 return os.path.isfile(path)
160
161 def list(self, location, maxdepth=1, include_dirs=False):
162 """
163 Return all files that match location.
164
165 This is either files whose names match up to an arbitrary extension,
166 or any files within an exactly matching directory name.
167
168 Args:
169 location (str): the location to match.
170 May contain the usual path wildcards * and ?
171
172 maxdepth (int, optional): maximum levels of directory nesting to
173 recurse into looking for files. Default 1.
174
175 include_dirs (bool, optional): whether to allow directories in
176 the results or just files. Default False.
177
178 Returns:
179 A list of matching files and/or directories, as locations
180 relative to our base_location.
181 """
182 location = self._normalize_slashes(location)
183 search_dir, pattern = os.path.split(location)
184 path = self.to_path(search_dir)
185
186 if not os.path.isdir(path):
187 return []
188
189 matches = [fn for fn in os.listdir(path) if fnmatch(fn, pattern + '*')]
190 out = []
191
192 for match in matches:
193 matchpath = self.join(path, match)
194 if os.path.isdir(matchpath) and fnmatch(match, pattern):
195 if maxdepth > 0:
196 # exact directory match - walk down to maxdepth
197 for root, dirs, files in os.walk(matchpath, topdown=True):
198 depth = root[len(path):].count(os.path.sep)
199 if depth == maxdepth:
200 dirs[:] = [] # don't recurse any further
201
202 for fn in files + (dirs if include_dirs else []):
203 out.append(self.to_location(self.join(root, fn)))
204
205 elif include_dirs:
206 out.append(self.join(search_dir, match))
207
208 elif (os.path.isfile(matchpath) and
209 (fnmatch(match, pattern) or
210 fnmatch(os.path.splitext(match)[0], pattern))):
211 # exact filename match, or match up to an extension
212 # note that we need fnmatch(match, pattern) in addition to the
213 # splitext test to cover the case of the base filename itself
214 # containing a dot.
215 out.append(self.join(search_dir, match))
216
217 return out
218
219 def remove(self, filename):
220 """Delete a file or folder and prune the directory tree."""
221 path = self.to_path(filename)
222 if os.path.isdir(path):
223 shutil.rmtree(path)
224 else:
225 os.remove(path)
226
227 filepath = os.path.split(path)[0]
228 try:
229 os.removedirs(filepath)
230 except OSError:
231 # directory was not empty - good that we're not removing it!
232 pass
233
234 def remove_all(self, location):
235 """
236 Delete all files/directories in the dataset at this location.
237
238 Afterward prunes the directory tree.
239 """
240 for fn in self.list(location):
241 self.remove(fn)
242
[end of qcodes/data/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qcodes/data/io.py b/qcodes/data/io.py
--- a/qcodes/data/io.py
+++ b/qcodes/data/io.py
@@ -141,7 +141,7 @@
location (str): the location string corresponding to this path.
"""
if self.base_location:
- return os.path.relpath(path, self.base_location)
+ return os.path.join(self.base_location, path)
else:
return path
|
{"golden_diff": "diff --git a/qcodes/data/io.py b/qcodes/data/io.py\n--- a/qcodes/data/io.py\n+++ b/qcodes/data/io.py\n@@ -141,7 +141,7 @@\n location (str): the location string corresponding to this path.\n \"\"\"\n if self.base_location:\n- return os.path.relpath(path, self.base_location)\n+ return os.path.join(self.base_location, path)\n else:\n return path\n", "issue": "error when saving to drive other than current path\nThis is due to windows handling of drives. A minimal example:\r\n``` python\r\nimport qcodes,os\r\n\r\ndatadir = r'd:\\Temp'\r\nqcodes.DataSet.default_io = qcodes.DiskIO(datadir)\r\n\r\np=qcodes.Parameter('p', set_cmd=None)\r\nq=qcodes.Parameter('q', set_cmd=None)\r\nds=qcodes.Loop(p[0:10:1]).each(q).run() # fine\r\n \r\nqcodes.DataSet.default_io = qcodes.DiskIO(r'c:\\Temp')\r\n\r\nds=qcodes.Loop(p[0:10:1]).each(p).run() # error\r\n```\r\nThis generates the error `ValueError: path is on mount 'd:', start on mount 'c:'`\r\n\r\nAlso see https://bugs.python.org/issue7195\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nIO managers for QCodes.\n\nIO managers wrap whatever physical storage layer the user wants to use\nin an interface mimicking the built-in <open> context manager, with\nsome restrictions to minimize the overhead in creating new IO managers.\n\nThe main thing these managers need to implement is the open context manager:\n\n- Only the context manager needs to be implemented, not separate\n open function and close methods.\n\n- open takes the standard parameters:\n\n - filename: (string)\n - mode: (string) only 'r' (read), 'w' (write), and 'a' (append) are\n expected to be implemented. As with normal file objects, the only\n difference between write and append is that write empties the file\n before adding new data, and append leaves the existing contents in\n place but starts writing at the end.\n - encoding: If a special output encoding is desired. i.e. 'utf8\n\n- the file-like object returned should implement a minimal set of operations.\n\n In read mode:\n - read([size]): read to the end or at most size bytes into a string\n - readline([size]): read until a newline or up to size bytes, into a string\n - iter(): usually return self, but can be any iterator over lines\n - next(): assuming iter() returns self, this yields the next line.\n\n In write or append mode:\n - write(s): add string s to the end of the file.\n - writelines(seq): add a sequence of strings\n\nIO managers should also implement:\n\n- a join method, ala os.path.join(\\*args).\n- a list method, that returns all objects matching location\n- a remove method, ala os.remove(path) except that it will remove directories\n as well as files, since we're allowing \"locations\" to be directories\n or files.\n\"\"\"\n\nfrom contextlib import contextmanager\nimport os\nimport re\nimport shutil\nfrom fnmatch import fnmatch\n\nALLOWED_OPEN_MODES = ('r', 'w', 'a')\n\n\nclass DiskIO:\n\n \"\"\"\n Simple IO object to wrap disk operations with a custom base location.\n\n Also accepts both forward and backward slashes at any point, and\n normalizes both to the OS we are currently on.\n\n Args:\n base_location (str): a path to the root data folder.\n Converted to an absolute path immediately, so even if you supply a\n relative path, later changes to the OS working directory will not\n affect data paths.\n \"\"\"\n\n def __init__(self, base_location):\n if base_location is None:\n self.base_location = None\n else:\n base_location = self._normalize_slashes(base_location)\n self.base_location = os.path.abspath(base_location)\n\n @contextmanager\n def open(self, filename, mode, encoding=None):\n \"\"\"\n Mimic the interface of the built in open context manager.\n\n Args:\n filename (str): path relative to base_location.\n\n mode (str): 'r' (read), 'w' (write), or 'a' (append).\n Other open modes are not supported because we don't want\n to force all IO managers to support others.\n\n Returns:\n context manager yielding the open file\n \"\"\"\n if mode not in ALLOWED_OPEN_MODES:\n raise ValueError('mode {} not allowed in IO managers'.format(mode))\n\n filepath = self.to_path(filename)\n\n # make directories if needed\n dirpath = os.path.dirname(filepath)\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n # normally we'd construct this context manager with try/finally, but\n # here we already have a context manager for open so we just wrap it\n with open(filepath, mode, encoding=encoding) as f:\n yield f\n\n def _normalize_slashes(self, location):\n # note that this is NOT os.path.join - the difference is os.path.join\n # discards empty strings, so if you use it on a re.split absolute\n # path you will get a relative path!\n return os.sep.join(re.split('[\\\\\\\\/]', location))\n\n def to_path(self, location):\n \"\"\"\n Convert a location string into a path on the local file system.\n\n For DiskIO this just fixes slashes and prepends the base location,\n doing nothing active with the file. But for other io managers that\n refer to remote storage, this method may actually fetch the file and\n put it at a temporary local path.\n\n Args:\n location (str): A location string for a complete dataset or\n a file within it.\n\n Returns:\n path (str): The path on disk to which this location maps.\n \"\"\"\n location = self._normalize_slashes(location)\n if self.base_location:\n return os.path.join(self.base_location, location)\n else:\n return location\n\n def to_location(self, path):\n \"\"\"\n Convert a local filesystem path into a location string.\n\n Args:\n path (str): a path on the local file system.\n\n Returns:\n location (str): the location string corresponding to this path.\n \"\"\"\n if self.base_location:\n return os.path.relpath(path, self.base_location)\n else:\n return path\n\n def __repr__(self):\n \"\"\"Show the base location in the repr.\"\"\"\n return '<DiskIO, base_location={}>'.format(repr(self.base_location))\n\n def join(self, *args):\n \"\"\"Context-dependent os.path.join for this io manager.\"\"\"\n return os.path.join(*list(map(self._normalize_slashes, args)))\n\n def isfile(self, location):\n \"\"\"Check whether this location matches a file.\"\"\"\n path = self.to_path(location)\n return os.path.isfile(path)\n\n def list(self, location, maxdepth=1, include_dirs=False):\n \"\"\"\n Return all files that match location.\n\n This is either files whose names match up to an arbitrary extension,\n or any files within an exactly matching directory name.\n\n Args:\n location (str): the location to match.\n May contain the usual path wildcards * and ?\n\n maxdepth (int, optional): maximum levels of directory nesting to\n recurse into looking for files. Default 1.\n\n include_dirs (bool, optional): whether to allow directories in\n the results or just files. Default False.\n\n Returns:\n A list of matching files and/or directories, as locations\n relative to our base_location.\n \"\"\"\n location = self._normalize_slashes(location)\n search_dir, pattern = os.path.split(location)\n path = self.to_path(search_dir)\n\n if not os.path.isdir(path):\n return []\n\n matches = [fn for fn in os.listdir(path) if fnmatch(fn, pattern + '*')]\n out = []\n\n for match in matches:\n matchpath = self.join(path, match)\n if os.path.isdir(matchpath) and fnmatch(match, pattern):\n if maxdepth > 0:\n # exact directory match - walk down to maxdepth\n for root, dirs, files in os.walk(matchpath, topdown=True):\n depth = root[len(path):].count(os.path.sep)\n if depth == maxdepth:\n dirs[:] = [] # don't recurse any further\n\n for fn in files + (dirs if include_dirs else []):\n out.append(self.to_location(self.join(root, fn)))\n\n elif include_dirs:\n out.append(self.join(search_dir, match))\n\n elif (os.path.isfile(matchpath) and\n (fnmatch(match, pattern) or\n fnmatch(os.path.splitext(match)[0], pattern))):\n # exact filename match, or match up to an extension\n # note that we need fnmatch(match, pattern) in addition to the\n # splitext test to cover the case of the base filename itself\n # containing a dot.\n out.append(self.join(search_dir, match))\n\n return out\n\n def remove(self, filename):\n \"\"\"Delete a file or folder and prune the directory tree.\"\"\"\n path = self.to_path(filename)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n filepath = os.path.split(path)[0]\n try:\n os.removedirs(filepath)\n except OSError:\n # directory was not empty - good that we're not removing it!\n pass\n\n def remove_all(self, location):\n \"\"\"\n Delete all files/directories in the dataset at this location.\n\n Afterward prunes the directory tree.\n \"\"\"\n for fn in self.list(location):\n self.remove(fn)\n", "path": "qcodes/data/io.py"}]}
| 3,190 | 99 |
gh_patches_debug_12530
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a config option to disable registering a route for /
In the web server by default a route is registered for the base / path. This should be configurable in case a user wants to register their own.
</issue>
<code>
[start of opsdroid/web.py]
1 """Submodule to handle web requests in opsdroid."""
2
3 import json
4 import logging
5 import ssl
6
7 from aiohttp import web
8
9 from opsdroid import __version__
10
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 class Web:
16 """Create class for opsdroid Web server."""
17
18 def __init__(self, opsdroid):
19 """Create web object."""
20 self.opsdroid = opsdroid
21 try:
22 self.config = self.opsdroid.config["web"]
23 except KeyError:
24 self.config = {}
25 self.base_url = self.config.get("base-url")
26 if not self.base_url:
27 self.base_url = "{proto}://{host}{port}".format(
28 proto="http" if self.get_ssl_context is None else "https",
29 host=self.get_host,
30 port=":{}".format(self.get_port)
31 if self.get_port not in (80, 443)
32 else "",
33 )
34 self.web_app = web.Application()
35 self.runner = web.AppRunner(self.web_app)
36 self.site = None
37 self.web_app.router.add_get("/", self.web_index_handler)
38 self.web_app.router.add_get("", self.web_index_handler)
39 self.web_app.router.add_get("/stats", self.web_stats_handler)
40 self.web_app.router.add_get("/stats/", self.web_stats_handler)
41
42 @property
43 def get_port(self):
44 """Return port from config or the default.
45
46 Args:
47 self: instance method
48
49 Returns:
50 int: returns value of port being used, config or default
51
52 """
53 try:
54 port = self.config["port"]
55 except KeyError:
56 if self.get_ssl_context is not None:
57 port = 8443
58 else:
59 port = 8080
60 return port
61
62 @property
63 def get_host(self):
64 """Return host from config or the default.
65
66 Args:
67 self: instance method
68
69 Returns:
70 string: returns address of host being used, config or default
71
72 """
73 try:
74 host = self.config["host"]
75 except KeyError:
76 host = "0.0.0.0"
77 return host
78
79 @property
80 def get_ssl_context(self):
81 """Return the ssl context or None.
82
83 Args:
84 self: instance method
85
86 Returns:
87 string (or NoneType): returns ssl context of None.
88
89 """
90 try:
91 ssl_config = self.config["ssl"]
92 sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
93 sslcontext.load_cert_chain(ssl_config["cert"], ssl_config["key"])
94 return sslcontext
95 except FileNotFoundError:
96 _LOGGER.error(_("Cannot find ssl cert or key."))
97 return None
98 except KeyError:
99 return None
100
101 async def start(self):
102 """Start web servers."""
103 _LOGGER.info(_(f"Started web server on {self.base_url}"))
104 await self.runner.setup()
105 self.site = web.TCPSite(
106 self.runner,
107 host=self.get_host,
108 port=self.get_port,
109 ssl_context=self.get_ssl_context,
110 )
111 await self.site.start()
112
113 async def stop(self):
114 """Stop the web server."""
115 await self.runner.cleanup()
116
117 @staticmethod
118 def build_response(status, result):
119 """Build a json response object to power the bot reponses.
120
121 Args:
122 result: serialize obj as a JSON formated stream
123
124 Returns:
125 json: returns json object with list of responses for the bot
126
127 """
128 return web.Response(text=json.dumps(result), status=status)
129
130 def register_skill(self, opsdroid, skill, webhook):
131 """Register a new skill in the web app router."""
132
133 async def wrapper(req, opsdroid=opsdroid, config=skill.config):
134 """Wrap up the aiohttp handler."""
135 webhook_token = self.config.get("webhook-token", None)
136 authorization_header = []
137 if req is not None:
138 authorization_header = req.headers.get("Authorization", "").split()
139
140 if webhook_token is not None:
141 if not (
142 len(authorization_header) == 2
143 and authorization_header[0] == "Bearer"
144 and authorization_header[1] == webhook_token
145 ):
146 _LOGGER.error(
147 _("Unauthorized to run skill %s via webhook"), webhook
148 )
149 return Web.build_response(403, {"called_skill": webhook})
150
151 _LOGGER.info(_("Running skill %s via webhook."), webhook)
152 opsdroid.stats["webhooks_called"] = opsdroid.stats["webhooks_called"] + 1
153 resp = await opsdroid.run_skill(skill, config, req)
154 if isinstance(resp, web.Response):
155 return resp
156 return Web.build_response(200, {"called_skill": webhook})
157
158 self.web_app.router.add_post(
159 "/skill/{}/{}".format(skill.config["name"], webhook), wrapper
160 )
161 self.web_app.router.add_post(
162 "/skill/{}/{}/".format(skill.config["name"], webhook), wrapper
163 )
164
165 def setup_webhooks(self, skills):
166 """Add the webhooks for the webhook skills to the router."""
167 for skill in skills:
168 for matcher in skill.matchers:
169 if "webhook" in matcher:
170 self.register_skill(self.opsdroid, skill, matcher["webhook"])
171
172 async def web_index_handler(self, request):
173 """Handle root web request to opsdroid API.
174
175 Args:
176 request: web request to the root (index)
177
178 Returns:
179 dict: returns successful status code and greeting for the root page
180
181 """
182 return self.build_response(200, {"message": "Welcome to the opsdroid API"})
183
184 async def web_stats_handler(self, request):
185 """Handle stats request.
186
187 Args:
188 request: web request to render opsdroid stats
189
190 Returns:
191 dict: returns successful status code and dictionary with
192 stats requested
193
194 """
195 stats = self.opsdroid.stats
196 try:
197 stats["average_response_time"] = (
198 stats["total_response_time"] / stats["total_responses"]
199 )
200 except ZeroDivisionError:
201 stats["average_response_time"] = 0
202
203 return self.build_response(
204 200,
205 {
206 "version": __version__,
207 "messages": {
208 "total_parsed": stats["messages_parsed"],
209 "webhooks_called": stats["webhooks_called"],
210 "total_response_time": stats["total_response_time"],
211 "total_responses": stats["total_responses"],
212 "average_response_time": stats["average_response_time"],
213 },
214 "modules": {
215 "skills": len(self.opsdroid.skills),
216 "connectors": len(self.opsdroid.connectors),
217 "databases": len(self.opsdroid.memory.databases),
218 },
219 },
220 )
221
[end of opsdroid/web.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/web.py b/opsdroid/web.py
--- a/opsdroid/web.py
+++ b/opsdroid/web.py
@@ -34,8 +34,10 @@
self.web_app = web.Application()
self.runner = web.AppRunner(self.web_app)
self.site = None
- self.web_app.router.add_get("/", self.web_index_handler)
- self.web_app.router.add_get("", self.web_index_handler)
+ if not self.config.get("disable_web_index_handler_in_root", False):
+ self.web_app.router.add_get("/", self.web_index_handler)
+ self.web_app.router.add_get("", self.web_index_handler)
+
self.web_app.router.add_get("/stats", self.web_stats_handler)
self.web_app.router.add_get("/stats/", self.web_stats_handler)
|
{"golden_diff": "diff --git a/opsdroid/web.py b/opsdroid/web.py\n--- a/opsdroid/web.py\n+++ b/opsdroid/web.py\n@@ -34,8 +34,10 @@\n self.web_app = web.Application()\n self.runner = web.AppRunner(self.web_app)\n self.site = None\n- self.web_app.router.add_get(\"/\", self.web_index_handler)\n- self.web_app.router.add_get(\"\", self.web_index_handler)\n+ if not self.config.get(\"disable_web_index_handler_in_root\", False):\n+ self.web_app.router.add_get(\"/\", self.web_index_handler)\n+ self.web_app.router.add_get(\"\", self.web_index_handler)\n+\n self.web_app.router.add_get(\"/stats\", self.web_stats_handler)\n self.web_app.router.add_get(\"/stats/\", self.web_stats_handler)\n", "issue": "Add a config option to disable registering a route for /\nIn the web server by default a route is registered for the base / path. This should be configurable in case a user wants to register their own.\n", "before_files": [{"content": "\"\"\"Submodule to handle web requests in opsdroid.\"\"\"\n\nimport json\nimport logging\nimport ssl\n\nfrom aiohttp import web\n\nfrom opsdroid import __version__\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Web:\n \"\"\"Create class for opsdroid Web server.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create web object.\"\"\"\n self.opsdroid = opsdroid\n try:\n self.config = self.opsdroid.config[\"web\"]\n except KeyError:\n self.config = {}\n self.base_url = self.config.get(\"base-url\")\n if not self.base_url:\n self.base_url = \"{proto}://{host}{port}\".format(\n proto=\"http\" if self.get_ssl_context is None else \"https\",\n host=self.get_host,\n port=\":{}\".format(self.get_port)\n if self.get_port not in (80, 443)\n else \"\",\n )\n self.web_app = web.Application()\n self.runner = web.AppRunner(self.web_app)\n self.site = None\n self.web_app.router.add_get(\"/\", self.web_index_handler)\n self.web_app.router.add_get(\"\", self.web_index_handler)\n self.web_app.router.add_get(\"/stats\", self.web_stats_handler)\n self.web_app.router.add_get(\"/stats/\", self.web_stats_handler)\n\n @property\n def get_port(self):\n \"\"\"Return port from config or the default.\n\n Args:\n self: instance method\n\n Returns:\n int: returns value of port being used, config or default\n\n \"\"\"\n try:\n port = self.config[\"port\"]\n except KeyError:\n if self.get_ssl_context is not None:\n port = 8443\n else:\n port = 8080\n return port\n\n @property\n def get_host(self):\n \"\"\"Return host from config or the default.\n\n Args:\n self: instance method\n\n Returns:\n string: returns address of host being used, config or default\n\n \"\"\"\n try:\n host = self.config[\"host\"]\n except KeyError:\n host = \"0.0.0.0\"\n return host\n\n @property\n def get_ssl_context(self):\n \"\"\"Return the ssl context or None.\n\n Args:\n self: instance method\n\n Returns:\n string (or NoneType): returns ssl context of None.\n\n \"\"\"\n try:\n ssl_config = self.config[\"ssl\"]\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n sslcontext.load_cert_chain(ssl_config[\"cert\"], ssl_config[\"key\"])\n return sslcontext\n except FileNotFoundError:\n _LOGGER.error(_(\"Cannot find ssl cert or key.\"))\n return None\n except KeyError:\n return None\n\n async def start(self):\n \"\"\"Start web servers.\"\"\"\n _LOGGER.info(_(f\"Started web server on {self.base_url}\"))\n await self.runner.setup()\n self.site = web.TCPSite(\n self.runner,\n host=self.get_host,\n port=self.get_port,\n ssl_context=self.get_ssl_context,\n )\n await self.site.start()\n\n async def stop(self):\n \"\"\"Stop the web server.\"\"\"\n await self.runner.cleanup()\n\n @staticmethod\n def build_response(status, result):\n \"\"\"Build a json response object to power the bot reponses.\n\n Args:\n result: serialize obj as a JSON formated stream\n\n Returns:\n json: returns json object with list of responses for the bot\n\n \"\"\"\n return web.Response(text=json.dumps(result), status=status)\n\n def register_skill(self, opsdroid, skill, webhook):\n \"\"\"Register a new skill in the web app router.\"\"\"\n\n async def wrapper(req, opsdroid=opsdroid, config=skill.config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n webhook_token = self.config.get(\"webhook-token\", None)\n authorization_header = []\n if req is not None:\n authorization_header = req.headers.get(\"Authorization\", \"\").split()\n\n if webhook_token is not None:\n if not (\n len(authorization_header) == 2\n and authorization_header[0] == \"Bearer\"\n and authorization_header[1] == webhook_token\n ):\n _LOGGER.error(\n _(\"Unauthorized to run skill %s via webhook\"), webhook\n )\n return Web.build_response(403, {\"called_skill\": webhook})\n\n _LOGGER.info(_(\"Running skill %s via webhook.\"), webhook)\n opsdroid.stats[\"webhooks_called\"] = opsdroid.stats[\"webhooks_called\"] + 1\n resp = await opsdroid.run_skill(skill, config, req)\n if isinstance(resp, web.Response):\n return resp\n return Web.build_response(200, {\"called_skill\": webhook})\n\n self.web_app.router.add_post(\n \"/skill/{}/{}\".format(skill.config[\"name\"], webhook), wrapper\n )\n self.web_app.router.add_post(\n \"/skill/{}/{}/\".format(skill.config[\"name\"], webhook), wrapper\n )\n\n def setup_webhooks(self, skills):\n \"\"\"Add the webhooks for the webhook skills to the router.\"\"\"\n for skill in skills:\n for matcher in skill.matchers:\n if \"webhook\" in matcher:\n self.register_skill(self.opsdroid, skill, matcher[\"webhook\"])\n\n async def web_index_handler(self, request):\n \"\"\"Handle root web request to opsdroid API.\n\n Args:\n request: web request to the root (index)\n\n Returns:\n dict: returns successful status code and greeting for the root page\n\n \"\"\"\n return self.build_response(200, {\"message\": \"Welcome to the opsdroid API\"})\n\n async def web_stats_handler(self, request):\n \"\"\"Handle stats request.\n\n Args:\n request: web request to render opsdroid stats\n\n Returns:\n dict: returns successful status code and dictionary with\n stats requested\n\n \"\"\"\n stats = self.opsdroid.stats\n try:\n stats[\"average_response_time\"] = (\n stats[\"total_response_time\"] / stats[\"total_responses\"]\n )\n except ZeroDivisionError:\n stats[\"average_response_time\"] = 0\n\n return self.build_response(\n 200,\n {\n \"version\": __version__,\n \"messages\": {\n \"total_parsed\": stats[\"messages_parsed\"],\n \"webhooks_called\": stats[\"webhooks_called\"],\n \"total_response_time\": stats[\"total_response_time\"],\n \"total_responses\": stats[\"total_responses\"],\n \"average_response_time\": stats[\"average_response_time\"],\n },\n \"modules\": {\n \"skills\": len(self.opsdroid.skills),\n \"connectors\": len(self.opsdroid.connectors),\n \"databases\": len(self.opsdroid.memory.databases),\n },\n },\n )\n", "path": "opsdroid/web.py"}]}
| 2,604 | 178 |
gh_patches_debug_11556
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-6174
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Doublon pour les logs en couleur dans l'env de dev
Sur mon environnement de dev, les logs en couleur apparaissent en double : dans leur version couleur et puis dans leur version standard.

OS : Ubuntu Budgie 20.04
</issue>
<code>
[start of zds/settings/dev.py]
1 from colorlog import ColoredFormatter
2
3 from .abstract_base import *
4
5 DEBUG = True
6
7 # NOTE: Can be removed once Django 3 is used
8 ALLOWED_HOSTS = [".localhost", "127.0.0.1", "[::1]"]
9
10 INSTALLED_APPS += (
11 "debug_toolbar",
12 "django_extensions",
13 )
14
15 MIDDLEWARE = (
16 "debug_toolbar.middleware.DebugToolbarMiddleware",
17 "zds.middlewares.nocacheindevmiddleware.NoCacheInDevMiddleware",
18 ) + MIDDLEWARE
19
20 LOGGING = {
21 "version": 1,
22 "disable_existing_loggers": False,
23 "formatters": {
24 "verbose": {
25 "()": ColoredFormatter,
26 "format": "%(log_color)s %(levelname)s %(reset)s %(bold_black)s%(name)s%(reset)s %(message)s",
27 "log_colors": {
28 "DEBUG": "fg_white,bg_black",
29 "INFO": "fg_black,bg_bold_white",
30 "WARNING": "fg_black,bg_bold_yellow",
31 "ERROR": "fg_bold_white,bg_bold_red",
32 "CRITICAL": "fg_bold_white,bg_bold_red",
33 },
34 },
35 "django.server": {
36 "()": ColoredFormatter,
37 "format": "%(log_color)s%(message)s",
38 "log_colors": {
39 "INFO": "bold_black",
40 "WARNING": "bold_yellow",
41 "ERROR": "bold_red",
42 "CRITICAL": "bold_red",
43 },
44 },
45 },
46 "handlers": {
47 "console": {
48 "level": "DEBUG",
49 "class": "logging.StreamHandler",
50 "formatter": "verbose",
51 },
52 "django.server": {
53 "level": "DEBUG",
54 "class": "logging.StreamHandler",
55 "formatter": "django.server",
56 },
57 },
58 "loggers": {
59 "django": {
60 "level": "INFO",
61 "handlers": ["console"],
62 },
63 "django.server": {
64 "level": "INFO",
65 "handlers": ["django.server"],
66 "propagate": False,
67 },
68 "zds": {
69 "level": "INFO",
70 "handlers": ["console"],
71 },
72 },
73 }
74
75 ZDS_APP["site"]["url"] = "http://127.0.0.1:8000"
76 ZDS_APP["site"]["dns"] = "127.0.0.1:8000"
77
78 ZDS_APP["very_top_banner"] = {
79 "background_color": "#666",
80 "border_color": "#353535",
81 "color": "white",
82 "message": "Version locale",
83 "slug": "version-locale",
84 }
85
[end of zds/settings/dev.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zds/settings/dev.py b/zds/settings/dev.py
--- a/zds/settings/dev.py
+++ b/zds/settings/dev.py
@@ -55,10 +55,12 @@
"formatter": "django.server",
},
},
+ "root": {
+ "handlers": ["console"],
+ },
"loggers": {
"django": {
"level": "INFO",
- "handlers": ["console"],
},
"django.server": {
"level": "INFO",
@@ -67,7 +69,6 @@
},
"zds": {
"level": "INFO",
- "handlers": ["console"],
},
},
}
|
{"golden_diff": "diff --git a/zds/settings/dev.py b/zds/settings/dev.py\n--- a/zds/settings/dev.py\n+++ b/zds/settings/dev.py\n@@ -55,10 +55,12 @@\n \"formatter\": \"django.server\",\n },\n },\n+ \"root\": {\n+ \"handlers\": [\"console\"],\n+ },\n \"loggers\": {\n \"django\": {\n \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n },\n \"django.server\": {\n \"level\": \"INFO\",\n@@ -67,7 +69,6 @@\n },\n \"zds\": {\n \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n },\n },\n }\n", "issue": "Doublon pour les logs en couleur dans l'env de dev\nSur mon environnement de dev, les logs en couleur apparaissent en double : dans leur version couleur et puis dans leur version standard.\r\n\r\n\r\n\r\nOS : Ubuntu Budgie 20.04\n", "before_files": [{"content": "from colorlog import ColoredFormatter\n\nfrom .abstract_base import *\n\nDEBUG = True\n\n# NOTE: Can be removed once Django 3 is used\nALLOWED_HOSTS = [\".localhost\", \"127.0.0.1\", \"[::1]\"]\n\nINSTALLED_APPS += (\n \"debug_toolbar\",\n \"django_extensions\",\n)\n\nMIDDLEWARE = (\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"zds.middlewares.nocacheindevmiddleware.NoCacheInDevMiddleware\",\n) + MIDDLEWARE\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"()\": ColoredFormatter,\n \"format\": \"%(log_color)s %(levelname)s %(reset)s %(bold_black)s%(name)s%(reset)s %(message)s\",\n \"log_colors\": {\n \"DEBUG\": \"fg_white,bg_black\",\n \"INFO\": \"fg_black,bg_bold_white\",\n \"WARNING\": \"fg_black,bg_bold_yellow\",\n \"ERROR\": \"fg_bold_white,bg_bold_red\",\n \"CRITICAL\": \"fg_bold_white,bg_bold_red\",\n },\n },\n \"django.server\": {\n \"()\": ColoredFormatter,\n \"format\": \"%(log_color)s%(message)s\",\n \"log_colors\": {\n \"INFO\": \"bold_black\",\n \"WARNING\": \"bold_yellow\",\n \"ERROR\": \"bold_red\",\n \"CRITICAL\": \"bold_red\",\n },\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"django.server\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"django.server\",\n },\n },\n \"loggers\": {\n \"django\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n },\n \"django.server\": {\n \"level\": \"INFO\",\n \"handlers\": [\"django.server\"],\n \"propagate\": False,\n },\n \"zds\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n },\n },\n}\n\nZDS_APP[\"site\"][\"url\"] = \"http://127.0.0.1:8000\"\nZDS_APP[\"site\"][\"dns\"] = \"127.0.0.1:8000\"\n\nZDS_APP[\"very_top_banner\"] = {\n \"background_color\": \"#666\",\n \"border_color\": \"#353535\",\n \"color\": \"white\",\n \"message\": \"Version locale\",\n \"slug\": \"version-locale\",\n}\n", "path": "zds/settings/dev.py"}]}
| 1,399 | 156 |
gh_patches_debug_242
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-1559
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2 failed tests when tox is not used (missing fixture "benchmark")
`pytest-benchmark` is not present in `tests_require`, so there are 2 failed tests in `tests/benchmark/test_route_resolution_benchmark.py` when tox is not used.
This requirement is present in `tox.ini` so tox and Travis CI are working fine.
(I don't know what's a better fix — disable the benchmark tests or add `pytest-benchmark` to `tests_require`, so I didn't create a PR)
</issue>
<code>
[start of setup.py]
1 """
2 Sanic
3 """
4 import codecs
5 import os
6 import re
7 import sys
8 from distutils.util import strtobool
9
10 from setuptools import setup
11 from setuptools.command.test import test as TestCommand
12
13
14 class PyTest(TestCommand):
15 """
16 Provide a Test runner to be used from setup.py to run unit tests
17 """
18
19 user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
20
21 def initialize_options(self):
22 TestCommand.initialize_options(self)
23 self.pytest_args = ""
24
25 def run_tests(self):
26 import shlex
27 import pytest
28
29 errno = pytest.main(shlex.split(self.pytest_args))
30 sys.exit(errno)
31
32
33 def open_local(paths, mode="r", encoding="utf8"):
34 path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)
35
36 return codecs.open(path, mode, encoding)
37
38
39 with open_local(["sanic", "__init__.py"], encoding="latin1") as fp:
40 try:
41 version = re.findall(
42 r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M
43 )[0]
44 except IndexError:
45 raise RuntimeError("Unable to determine version.")
46
47 with open_local(["README.rst"]) as rm:
48 long_description = rm.read()
49
50 setup_kwargs = {
51 "name": "sanic",
52 "version": version,
53 "url": "http://github.com/channelcat/sanic/",
54 "license": "MIT",
55 "author": "Channel Cat",
56 "author_email": "[email protected]",
57 "description": (
58 "A microframework based on uvloop, httptools, and learnings of flask"
59 ),
60 "long_description": long_description,
61 "packages": ["sanic"],
62 "platforms": "any",
63 "classifiers": [
64 "Development Status :: 4 - Beta",
65 "Environment :: Web Environment",
66 "License :: OSI Approved :: MIT License",
67 "Programming Language :: Python :: 3.5",
68 "Programming Language :: Python :: 3.6",
69 "Programming Language :: Python :: 3.7",
70 ],
71 }
72
73 env_dependency = (
74 '; sys_platform != "win32" ' 'and implementation_name == "cpython"'
75 )
76 ujson = "ujson>=1.35" + env_dependency
77 uvloop = "uvloop>=0.5.3" + env_dependency
78
79 requirements = [
80 "httptools>=0.0.10",
81 uvloop,
82 ujson,
83 "aiofiles>=0.3.0",
84 "websockets>=6.0,<7.0",
85 "multidict>=4.0,<5.0",
86 ]
87
88 tests_require = [
89 "pytest==4.1.0",
90 "multidict>=4.0,<5.0",
91 "gunicorn",
92 "pytest-cov",
93 "aiohttp>=2.3.0,<=3.2.1",
94 "beautifulsoup4",
95 uvloop,
96 ujson,
97 "pytest-sanic",
98 "pytest-sugar",
99 ]
100
101 if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
102 print("Installing without uJSON")
103 requirements.remove(ujson)
104 tests_require.remove(ujson)
105
106 # 'nt' means windows OS
107 if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")):
108 print("Installing without uvLoop")
109 requirements.remove(uvloop)
110 tests_require.remove(uvloop)
111
112 extras_require = {
113 "test": tests_require,
114 "dev": tests_require + ["aiofiles", "tox", "black", "flake8"],
115 "docs": [
116 "sphinx",
117 "sphinx_rtd_theme",
118 "recommonmark",
119 "sphinxcontrib-asyncio",
120 "docutils",
121 "pygments"
122 ],
123 }
124
125 setup_kwargs["install_requires"] = requirements
126 setup_kwargs["tests_require"] = tests_require
127 setup_kwargs["extras_require"] = extras_require
128 setup_kwargs["cmdclass"] = {"test": PyTest}
129 setup(**setup_kwargs)
130
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,6 +96,7 @@
ujson,
"pytest-sanic",
"pytest-sugar",
+ "pytest-benchmark",
]
if strtobool(os.environ.get("SANIC_NO_UJSON", "no")):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,6 +96,7 @@\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n+ \"pytest-benchmark\",\n ]\n \n if strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n", "issue": "2 failed tests when tox is not used (missing fixture \"benchmark\")\n`pytest-benchmark` is not present in `tests_require`, so there are 2 failed tests in `tests/benchmark/test_route_resolution_benchmark.py` when tox is not used.\r\n\r\nThis requirement is present in `tox.ini` so tox and Travis CI are working fine.\r\n\r\n(I don't know what's a better fix \u2014 disable the benchmark tests or add `pytest-benchmark` to `tests_require`, so I didn't create a PR)\n", "before_files": [{"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nimport sys\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n \"\"\"\n Provide a Test runner to be used from setup.py to run unit tests\n \"\"\"\n\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def run_tests(self):\n import shlex\n import pytest\n\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef open_local(paths, mode=\"r\", encoding=\"utf8\"):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local([\"sanic\", \"__init__.py\"], encoding=\"latin1\") as fp:\n try:\n version = re.findall(\n r\"^__version__ = \\\"([^']+)\\\"\\r?$\", fp.read(), re.M\n )[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nwith open_local([\"README.rst\"]) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n \"name\": \"sanic\",\n \"version\": version,\n \"url\": \"http://github.com/channelcat/sanic/\",\n \"license\": \"MIT\",\n \"author\": \"Channel Cat\",\n \"author_email\": \"[email protected]\",\n \"description\": (\n \"A microframework based on uvloop, httptools, and learnings of flask\"\n ),\n \"long_description\": long_description,\n \"packages\": [\"sanic\"],\n \"platforms\": \"any\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n}\n\nenv_dependency = (\n '; sys_platform != \"win32\" ' 'and implementation_name == \"cpython\"'\n)\nujson = \"ujson>=1.35\" + env_dependency\nuvloop = \"uvloop>=0.5.3\" + env_dependency\n\nrequirements = [\n \"httptools>=0.0.10\",\n uvloop,\n ujson,\n \"aiofiles>=0.3.0\",\n \"websockets>=6.0,<7.0\",\n \"multidict>=4.0,<5.0\",\n]\n\ntests_require = [\n \"pytest==4.1.0\",\n \"multidict>=4.0,<5.0\",\n \"gunicorn\",\n \"pytest-cov\",\n \"aiohttp>=2.3.0,<=3.2.1\",\n \"beautifulsoup4\",\n uvloop,\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n]\n\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n tests_require.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n tests_require.remove(uvloop)\n\nextras_require = {\n \"test\": tests_require,\n \"dev\": tests_require + [\"aiofiles\", \"tox\", \"black\", \"flake8\"],\n \"docs\": [\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"recommonmark\",\n \"sphinxcontrib-asyncio\",\n \"docutils\",\n \"pygments\"\n ],\n}\n\nsetup_kwargs[\"install_requires\"] = requirements\nsetup_kwargs[\"tests_require\"] = tests_require\nsetup_kwargs[\"extras_require\"] = extras_require\nsetup_kwargs[\"cmdclass\"] = {\"test\": PyTest}\nsetup(**setup_kwargs)\n", "path": "setup.py"}]}
| 1,808 | 75 |
gh_patches_debug_38426
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2453
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the special mangling rule for question marks
This was one mangling detail that I disagreed with @gilch about years ago, but I belatedly think he was right. The rule that `foo?` mangles to `is_foo` (instead of `hyx_fooXquestion_markX`) should probably go. What's convinced me is wanting to use question marks on some names and then deciding not to because of mangling:
- In [Infinitesimal Quest 2 + ε](https://github.com/hylang/simalq), items have a Boolean attribute `destroy-on-pickup` that indicate whether the item is destroyed when it's picked up. I could call it `destroy-on-pickup?`, but that would mangle to the nonsensical `is_destroy_on_pickup`.
- Likewise, `attack?`, meaning "should this monster attack?", would be misleadingly mangled to `is_attack`.
Another concern is that Hy's core macro `is-not` can be written `not?`, which looks like a unary predicate for some sort of object called a "not", instead of a binary function that indicates whether `a` is not `b`.
One motivation for the question-mark rule was that it could provide a Lispy alias for a Python name: if a library has a function `is_foo`, you can call it as `foo`. But this purpose was always undermined by [Python core's inconsistency between `isfoo` and `is_foo`](https://github.com/python/cpython/issues/73274).
Finally, there's something to be said for making mangling a little simpler, since it's pretty complicated.
</issue>
<code>
[start of hy/reader/mangling.py]
1 import re
2 import unicodedata
3
4 MANGLE_DELIM = "X"
5
6 normalizes_to_underscore = "_︳︴﹍﹎﹏_"
7
8
9 def mangle(s):
10 """Stringify the argument (with :class:`str`, not :func:`repr` or
11 :hy:func:`hy.repr`) and convert it to a valid Python identifier according
12 to :ref:`Hy's mangling rules <mangling>`. ::
13
14 (hy.mangle 'foo-bar?) ; => "is_foo_bar"
15 (hy.mangle "🦑") ; => "hyx_squid"
16
17 If the stringified argument is already both legal as a Python identifier
18 and normalized according to Unicode normalization form KC (NFKC), it will
19 be returned unchanged. Thus, ``hy.mangle`` is idempotent. ::
20
21 (setv x '♦-->♠)
22 (= (hy.mangle (hy.mangle x)) (hy.mangle x)) ; => True
23
24 Generally, the stringifed input is expected to be parsable as a symbol. As
25 a convenience, it can also have the syntax of a :ref:`dotted identifier
26 <dotted-identifiers>`, and ``hy.mangle`` will mangle the dot-delimited
27 parts separately. ::
28
29 (hy.mangle "a.b?.c!.d") ; => "a.is_b.hyx_cXexclamation_markX.d"
30 """
31
32 assert s
33 s = str(s)
34
35 if "." in s and s.strip("."):
36 return ".".join(mangle(x) if x else "" for x in s.split("."))
37
38 # Step 1: Remove and save leading underscores
39 s2 = s.lstrip(normalizes_to_underscore)
40 leading_underscores = "_" * (len(s) - len(s2))
41 s = s2
42
43 # Step 2: Convert hyphens without introducing a new leading underscore
44 s = s[0] + s[1:].replace("-", "_") if s else s
45
46 # Step 3: Convert trailing `?` to leading `is_`
47 if s.endswith("?"):
48 s = "is_" + s[:-1]
49
50 # Step 4: Convert invalid characters or reserved words
51 if not (leading_underscores + s).isidentifier():
52 # Replace illegal characters with their Unicode character
53 # names, or hexadecimal if they don't have one.
54 s = "hyx_" + "".join(
55 c if c != MANGLE_DELIM and ("S" + c).isidentifier()
56 # We prepend the "S" because some characters aren't
57 # allowed at the start of an identifier.
58 else "{0}{1}{0}".format(
59 MANGLE_DELIM,
60 unicodedata.name(c, "").lower().replace("-", "H").replace(" ", "_")
61 or "U{:x}".format(ord(c)),
62 )
63 for c in s
64 )
65
66 # Step 5: Add back leading underscores
67 s = leading_underscores + s
68
69 # Normalize Unicode per PEP 3131.
70 s = unicodedata.normalize("NFKC", s)
71
72 assert s.isidentifier()
73 return s
74
75
76 def unmangle(s):
77 """Stringify the argument and try to convert it to a pretty unmangled
78 form. See :ref:`Hy's mangling rules <mangling>`.
79
80 Unmangling may not round-trip, because different Hy symbol names can mangle
81 to the same Python identifier. In particular, Python itself already
82 considers distinct strings that have the same normalized form (according to
83 NFKC), such as ``hello`` and ``𝔥𝔢𝔩𝔩𝔬``, to be the same identifier.
84
85 Examples:
86 ::
87
88 => (hy.unmangle 'foo_bar)
89 "foo-bar"
90
91 => (hy.unmangle 'is_foo_bar)
92 "foo-bar?"
93
94 => (hy.unmangle 'hyx_XasteriskX)
95 "*"
96
97 => (hy.unmangle '_hyx_is_fooXsolidusXa)
98 "_foo/a?"
99
100 => (hy.unmangle 'hyx_XhyphenHminusX_XgreaterHthan_signX)
101 "-->"
102
103 => (hy.unmangle 'hyx_XlessHthan_signX__)
104 "<--"
105
106 => (hy.unmangle '__dunder_name__)
107 "__dunder-name__"
108
109 """
110
111 s = str(s)
112
113 prefix = ""
114 suffix = ""
115 m = re.fullmatch(r"(_+)(.*?)(_*)", s, re.DOTALL)
116 if m:
117 prefix, s, suffix = m.groups()
118
119 if s.startswith("hyx_"):
120 s = re.sub(
121 "{0}(U)?([_a-z0-9H]+?){0}".format(MANGLE_DELIM),
122 lambda mo: chr(int(mo.group(2), base=16))
123 if mo.group(1)
124 else unicodedata.lookup(
125 mo.group(2).replace("_", " ").replace("H", "-").upper()
126 ),
127 s[len("hyx_") :],
128 )
129 if s.startswith("is_"):
130 s = s[len("is_") :] + "?"
131 s = s.replace("_", "-")
132
133 return prefix + s + suffix
134
[end of hy/reader/mangling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hy/reader/mangling.py b/hy/reader/mangling.py
--- a/hy/reader/mangling.py
+++ b/hy/reader/mangling.py
@@ -11,7 +11,7 @@
:hy:func:`hy.repr`) and convert it to a valid Python identifier according
to :ref:`Hy's mangling rules <mangling>`. ::
- (hy.mangle 'foo-bar?) ; => "is_foo_bar"
+ (hy.mangle 'foo-bar) ; => "foo_bar"
(hy.mangle "🦑") ; => "hyx_squid"
If the stringified argument is already both legal as a Python identifier
@@ -26,7 +26,7 @@
<dotted-identifiers>`, and ``hy.mangle`` will mangle the dot-delimited
parts separately. ::
- (hy.mangle "a.b?.c!.d") ; => "a.is_b.hyx_cXexclamation_markX.d"
+ (hy.mangle "a.c!.d") ; => "a.hyx_cXexclamation_markX.d"
"""
assert s
@@ -35,19 +35,15 @@
if "." in s and s.strip("."):
return ".".join(mangle(x) if x else "" for x in s.split("."))
- # Step 1: Remove and save leading underscores
+ # Remove and save leading underscores
s2 = s.lstrip(normalizes_to_underscore)
leading_underscores = "_" * (len(s) - len(s2))
s = s2
- # Step 2: Convert hyphens without introducing a new leading underscore
+ # Convert hyphens without introducing a new leading underscore
s = s[0] + s[1:].replace("-", "_") if s else s
- # Step 3: Convert trailing `?` to leading `is_`
- if s.endswith("?"):
- s = "is_" + s[:-1]
-
- # Step 4: Convert invalid characters or reserved words
+ # Convert invalid characters or reserved words
if not (leading_underscores + s).isidentifier():
# Replace illegal characters with their Unicode character
# names, or hexadecimal if they don't have one.
@@ -88,15 +84,9 @@
=> (hy.unmangle 'foo_bar)
"foo-bar"
- => (hy.unmangle 'is_foo_bar)
- "foo-bar?"
-
=> (hy.unmangle 'hyx_XasteriskX)
"*"
- => (hy.unmangle '_hyx_is_fooXsolidusXa)
- "_foo/a?"
-
=> (hy.unmangle 'hyx_XhyphenHminusX_XgreaterHthan_signX)
"-->"
@@ -126,8 +116,6 @@
),
s[len("hyx_") :],
)
- if s.startswith("is_"):
- s = s[len("is_") :] + "?"
s = s.replace("_", "-")
return prefix + s + suffix
|
{"golden_diff": "diff --git a/hy/reader/mangling.py b/hy/reader/mangling.py\n--- a/hy/reader/mangling.py\n+++ b/hy/reader/mangling.py\n@@ -11,7 +11,7 @@\n :hy:func:`hy.repr`) and convert it to a valid Python identifier according\n to :ref:`Hy's mangling rules <mangling>`. ::\n \n- (hy.mangle 'foo-bar?) ; => \"is_foo_bar\"\n+ (hy.mangle 'foo-bar) ; => \"foo_bar\"\n (hy.mangle \"\ud83e\udd91\") ; => \"hyx_squid\"\n \n If the stringified argument is already both legal as a Python identifier\n@@ -26,7 +26,7 @@\n <dotted-identifiers>`, and ``hy.mangle`` will mangle the dot-delimited\n parts separately. ::\n \n- (hy.mangle \"a.b?.c!.d\") ; => \"a.is_b.hyx_cXexclamation_markX.d\"\n+ (hy.mangle \"a.c!.d\") ; => \"a.hyx_cXexclamation_markX.d\"\n \"\"\"\n \n assert s\n@@ -35,19 +35,15 @@\n if \".\" in s and s.strip(\".\"):\n return \".\".join(mangle(x) if x else \"\" for x in s.split(\".\"))\n \n- # Step 1: Remove and save leading underscores\n+ # Remove and save leading underscores\n s2 = s.lstrip(normalizes_to_underscore)\n leading_underscores = \"_\" * (len(s) - len(s2))\n s = s2\n \n- # Step 2: Convert hyphens without introducing a new leading underscore\n+ # Convert hyphens without introducing a new leading underscore\n s = s[0] + s[1:].replace(\"-\", \"_\") if s else s\n \n- # Step 3: Convert trailing `?` to leading `is_`\n- if s.endswith(\"?\"):\n- s = \"is_\" + s[:-1]\n-\n- # Step 4: Convert invalid characters or reserved words\n+ # Convert invalid characters or reserved words\n if not (leading_underscores + s).isidentifier():\n # Replace illegal characters with their Unicode character\n # names, or hexadecimal if they don't have one.\n@@ -88,15 +84,9 @@\n => (hy.unmangle 'foo_bar)\n \"foo-bar\"\n \n- => (hy.unmangle 'is_foo_bar)\n- \"foo-bar?\"\n-\n => (hy.unmangle 'hyx_XasteriskX)\n \"*\"\n \n- => (hy.unmangle '_hyx_is_fooXsolidusXa)\n- \"_foo/a?\"\n-\n => (hy.unmangle 'hyx_XhyphenHminusX_XgreaterHthan_signX)\n \"-->\"\n \n@@ -126,8 +116,6 @@\n ),\n s[len(\"hyx_\") :],\n )\n- if s.startswith(\"is_\"):\n- s = s[len(\"is_\") :] + \"?\"\n s = s.replace(\"_\", \"-\")\n \n return prefix + s + suffix\n", "issue": "Remove the special mangling rule for question marks\nThis was one mangling detail that I disagreed with @gilch about years ago, but I belatedly think he was right. The rule that `foo?` mangles to `is_foo` (instead of `hyx_fooXquestion_markX`) should probably go. What's convinced me is wanting to use question marks on some names and then deciding not to because of mangling:\r\n\r\n- In [Infinitesimal Quest 2 + \u03b5](https://github.com/hylang/simalq), items have a Boolean attribute `destroy-on-pickup` that indicate whether the item is destroyed when it's picked up. I could call it `destroy-on-pickup?`, but that would mangle to the nonsensical `is_destroy_on_pickup`.\r\n- Likewise, `attack?`, meaning \"should this monster attack?\", would be misleadingly mangled to `is_attack`.\r\n\r\nAnother concern is that Hy's core macro `is-not` can be written `not?`, which looks like a unary predicate for some sort of object called a \"not\", instead of a binary function that indicates whether `a` is not `b`.\r\n\r\nOne motivation for the question-mark rule was that it could provide a Lispy alias for a Python name: if a library has a function `is_foo`, you can call it as `foo`. But this purpose was always undermined by [Python core's inconsistency between `isfoo` and `is_foo`](https://github.com/python/cpython/issues/73274).\r\n\r\nFinally, there's something to be said for making mangling a little simpler, since it's pretty complicated.\n", "before_files": [{"content": "import re\nimport unicodedata\n\nMANGLE_DELIM = \"X\"\n\nnormalizes_to_underscore = \"_\ufe33\ufe34\ufe4d\ufe4e\ufe4f\uff3f\"\n\n\ndef mangle(s):\n \"\"\"Stringify the argument (with :class:`str`, not :func:`repr` or\n :hy:func:`hy.repr`) and convert it to a valid Python identifier according\n to :ref:`Hy's mangling rules <mangling>`. ::\n\n (hy.mangle 'foo-bar?) ; => \"is_foo_bar\"\n (hy.mangle \"\ud83e\udd91\") ; => \"hyx_squid\"\n\n If the stringified argument is already both legal as a Python identifier\n and normalized according to Unicode normalization form KC (NFKC), it will\n be returned unchanged. Thus, ``hy.mangle`` is idempotent. ::\n\n (setv x '\u2666-->\u2660)\n (= (hy.mangle (hy.mangle x)) (hy.mangle x)) ; => True\n\n Generally, the stringifed input is expected to be parsable as a symbol. As\n a convenience, it can also have the syntax of a :ref:`dotted identifier\n <dotted-identifiers>`, and ``hy.mangle`` will mangle the dot-delimited\n parts separately. ::\n\n (hy.mangle \"a.b?.c!.d\") ; => \"a.is_b.hyx_cXexclamation_markX.d\"\n \"\"\"\n\n assert s\n s = str(s)\n\n if \".\" in s and s.strip(\".\"):\n return \".\".join(mangle(x) if x else \"\" for x in s.split(\".\"))\n\n # Step 1: Remove and save leading underscores\n s2 = s.lstrip(normalizes_to_underscore)\n leading_underscores = \"_\" * (len(s) - len(s2))\n s = s2\n\n # Step 2: Convert hyphens without introducing a new leading underscore\n s = s[0] + s[1:].replace(\"-\", \"_\") if s else s\n\n # Step 3: Convert trailing `?` to leading `is_`\n if s.endswith(\"?\"):\n s = \"is_\" + s[:-1]\n\n # Step 4: Convert invalid characters or reserved words\n if not (leading_underscores + s).isidentifier():\n # Replace illegal characters with their Unicode character\n # names, or hexadecimal if they don't have one.\n s = \"hyx_\" + \"\".join(\n c if c != MANGLE_DELIM and (\"S\" + c).isidentifier()\n # We prepend the \"S\" because some characters aren't\n # allowed at the start of an identifier.\n else \"{0}{1}{0}\".format(\n MANGLE_DELIM,\n unicodedata.name(c, \"\").lower().replace(\"-\", \"H\").replace(\" \", \"_\")\n or \"U{:x}\".format(ord(c)),\n )\n for c in s\n )\n\n # Step 5: Add back leading underscores\n s = leading_underscores + s\n\n # Normalize Unicode per PEP 3131.\n s = unicodedata.normalize(\"NFKC\", s)\n\n assert s.isidentifier()\n return s\n\n\ndef unmangle(s):\n \"\"\"Stringify the argument and try to convert it to a pretty unmangled\n form. See :ref:`Hy's mangling rules <mangling>`.\n\n Unmangling may not round-trip, because different Hy symbol names can mangle\n to the same Python identifier. In particular, Python itself already\n considers distinct strings that have the same normalized form (according to\n NFKC), such as ``hello`` and ``\ud835\udd25\ud835\udd22\ud835\udd29\ud835\udd29\ud835\udd2c``, to be the same identifier.\n\n Examples:\n ::\n\n => (hy.unmangle 'foo_bar)\n \"foo-bar\"\n\n => (hy.unmangle 'is_foo_bar)\n \"foo-bar?\"\n\n => (hy.unmangle 'hyx_XasteriskX)\n \"*\"\n\n => (hy.unmangle '_hyx_is_fooXsolidusXa)\n \"_foo/a?\"\n\n => (hy.unmangle 'hyx_XhyphenHminusX_XgreaterHthan_signX)\n \"-->\"\n\n => (hy.unmangle 'hyx_XlessHthan_signX__)\n \"<--\"\n\n => (hy.unmangle '__dunder_name__)\n \"__dunder-name__\"\n\n \"\"\"\n\n s = str(s)\n\n prefix = \"\"\n suffix = \"\"\n m = re.fullmatch(r\"(_+)(.*?)(_*)\", s, re.DOTALL)\n if m:\n prefix, s, suffix = m.groups()\n\n if s.startswith(\"hyx_\"):\n s = re.sub(\n \"{0}(U)?([_a-z0-9H]+?){0}\".format(MANGLE_DELIM),\n lambda mo: chr(int(mo.group(2), base=16))\n if mo.group(1)\n else unicodedata.lookup(\n mo.group(2).replace(\"_\", \" \").replace(\"H\", \"-\").upper()\n ),\n s[len(\"hyx_\") :],\n )\n if s.startswith(\"is_\"):\n s = s[len(\"is_\") :] + \"?\"\n s = s.replace(\"_\", \"-\")\n\n return prefix + s + suffix\n", "path": "hy/reader/mangling.py"}]}
| 2,361 | 713 |
gh_patches_debug_16692
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-678
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update setup.rst to reference raster-vision-cloudformation
The setup instructions currently reference raster-vision-aws.
</issue>
<code>
[start of docs/conf.py]
1 from pallets_sphinx_themes import ProjectLink, get_version
2
3 # -*- coding: utf-8 -*-
4 #
5 # Configuration file for the Sphinx documentation builder.
6 #
7 # This file does only contain a selection of the most common options. For a
8 # full list see the documentation:
9 # http://www.sphinx-doc.org/en/stable/config
10
11 # -- Path setup --------------------------------------------------------------
12
13 # If extensions (or modules to document with autodoc) are in another directory,
14 # add these directories to sys.path here. If the directory is relative to the
15 # documentation root, use os.path.abspath to make it absolute, like shown here.
16 #
17 # import os
18 # import sys
19 # sys.path.insert(0, os.path.abspath('.'))
20
21
22 # -- Project information -----------------------------------------------------
23
24 project = 'Raster Vision'
25 copyright = '2018, Azavea'
26 author = 'Azavea'
27
28 # The short X.Y version
29 version = '0.8'
30 # The full version, including alpha/beta/rc tags
31 release = '0.8.0'
32
33
34 # -- General configuration ---------------------------------------------------
35
36 # If your documentation needs a minimal Sphinx version, state it here.
37 #
38 # needs_sphinx = '1.0'
39
40 # Add any Sphinx extension module names here, as strings. They can be
41 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 # ones.
43 extensions = [
44 'sphinx.ext.autodoc',
45 'sphinx.ext.intersphinx',
46 'pallets_sphinx_themes',
47 'sphinxcontrib.programoutput'
48 ]
49
50 intersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}
51
52 # Add any paths that contain templates here, relative to this directory.
53 templates_path = ['_templates']
54
55 # The suffix(es) of source filenames.
56 # You can specify multiple suffix as a list of string:
57 #
58 # source_suffix = ['.rst', '.md']
59 source_suffix = '.rst'
60
61 # The master toctree document.
62 master_doc = 'index'
63
64 # The language for content autogenerated by Sphinx. Refer to documentation
65 # for a list of supported languages.
66 #
67 # This is also used if you do content translation via gettext catalogs.
68 # Usually you set "language" from the command line for these cases.
69 language = None
70
71 # List of patterns, relative to source directory, that match files and
72 # directories to ignore when looking for source files.
73 # This pattern also affects html_static_path and html_extra_path .
74 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']
75
76 # The name of the Pygments (syntax highlighting) style to use.
77 # pygments_style = 'sphinx'
78
79 # HTML -----------------------------------------------------------------
80
81 html_theme = 'click'
82 html_theme_options = {'index_sidebar_logo': False}
83 html_context = {
84 'project_links': [
85 ProjectLink('Quickstart', 'quickstart.html'),
86 ProjectLink('Documentation TOC', 'index.html#documentation'),
87 ProjectLink('API Reference TOC', 'index.html#api-reference'),
88 ProjectLink('Project Website', 'https://rastervision.io/'),
89 ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),
90 ProjectLink('GitHub', 'https://github.com/azavea/raster-vision'),
91 ProjectLink('Gitter Channel', 'https://gitter.im/azavea/raster-vision'),
92 ProjectLink('Raster Vision Examples', 'https://github.com/azavea/raster-vision-examples'),
93 ProjectLink('QGIS Plugin', 'https://github.com/azavea/raster-vision-qgis'),
94 ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-aws'),
95 ProjectLink('Issue Tracker', 'https://github.com/azavea/raster-vision/issues/'),
96 ProjectLink('CHANGELOG', 'changelog.html'),
97 ProjectLink('Azavea', 'https://www.azavea.com/'),
98 ],
99 'css_files': [
100 '_static/rastervision.css',
101 'https://media.readthedocs.org/css/badge_only.css'
102 ]
103 }
104 html_sidebars = {
105 'index': ['project.html', 'versions.html', 'searchbox.html'],
106 '**': ['project.html', 'localtoc.html', 'relations.html', 'versions.html', 'searchbox.html'],
107 }
108 singlehtml_sidebars = {'index': ['project.html', 'versions.html', 'localtoc.html']}
109 html_static_path = ['_static']
110 html_favicon = '_static/raster-vision-icon.png'
111 html_logo = '_static/raster-vision-logo.png'
112 html_title = 'Raster Vision Documentation ({})'.format(version)
113 html_show_sourcelink = False
114 html_domain_indices = False
115 html_experimental_html5_writer = True
116
117 # -- Options for HTMLHelp output ---------------------------------------------
118
119 # Output file base name for HTML help builder.
120 htmlhelp_basename = 'RasterVisiondoc'
121
122
123 # -- Options for LaTeX output ------------------------------------------------
124
125 latex_elements = {
126 # The paper size ('letterpaper' or 'a4paper').
127 #
128 # 'papersize': 'letterpaper',
129
130 # The font size ('10pt', '11pt' or '12pt').
131 #
132 # 'pointsize': '10pt',
133
134 # Additional stuff for the LaTeX preamble.
135 #
136 # 'preamble': '',
137
138 # Latex figure (float) alignment
139 #
140 # 'figure_align': 'htbp',
141 }
142
143 # Grouping the document tree into LaTeX files. List of tuples
144 # (source start file, target name, title,
145 # author, documentclass [howto, manual, or own class]).
146 latex_documents = [
147 (master_doc, 'RasterVision.tex', 'Raster Vision Documentation',
148 'Azavea', 'manual'),
149 ]
150
151
152 # -- Options for manual page output ------------------------------------------
153
154 # One entry per manual page. List of tuples
155 # (source start file, name, description, authors, manual section).
156 man_pages = [
157 (master_doc, 'RasterVisoin-{}.tex', html_title,
158 [author], 'manual')
159 ]
160
161
162 # -- Options for Texinfo output ----------------------------------------------
163
164 # Grouping the document tree into Texinfo files. List of tuples
165 # (source start file, target name, title, author,
166 # dir menu entry, description, category)
167 texinfo_documents = [
168 (master_doc, 'RasterVision', 'Raster Vision Documentation',
169 author, 'RasterVision', 'One line description of project.',
170 'Miscellaneous'),
171 ]
172
173
174 # -- Extension configuration -------------------------------------------------
175
176 programoutput_prompt_template = '> {command}\n{output}'
177
178 # -- Options for todo extension ----------------------------------------------
179
180 # If true, `todo` and `todoList` produce output, else they produce nothing.
181 todo_include_todos = True
182
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -91,7 +91,7 @@
ProjectLink('Gitter Channel', 'https://gitter.im/azavea/raster-vision'),
ProjectLink('Raster Vision Examples', 'https://github.com/azavea/raster-vision-examples'),
ProjectLink('QGIS Plugin', 'https://github.com/azavea/raster-vision-qgis'),
- ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-aws'),
+ ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-cloudformation'),
ProjectLink('Issue Tracker', 'https://github.com/azavea/raster-vision/issues/'),
ProjectLink('CHANGELOG', 'changelog.html'),
ProjectLink('Azavea', 'https://www.azavea.com/'),
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -91,7 +91,7 @@\n ProjectLink('Gitter Channel', 'https://gitter.im/azavea/raster-vision'),\n ProjectLink('Raster Vision Examples', 'https://github.com/azavea/raster-vision-examples'),\n ProjectLink('QGIS Plugin', 'https://github.com/azavea/raster-vision-qgis'),\n- ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-aws'),\n+ ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-cloudformation'),\n ProjectLink('Issue Tracker', 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n ProjectLink('Azavea', 'https://www.azavea.com/'),\n", "issue": "Update setup.rst to reference raster-vision-cloudformation\nThe setup instructions currently reference raster-vision-aws.\n", "before_files": [{"content": "from pallets_sphinx_themes import ProjectLink, get_version\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Raster Vision'\ncopyright = '2018, Azavea'\nauthor = 'Azavea'\n\n# The short X.Y version\nversion = '0.8'\n# The full version, including alpha/beta/rc tags\nrelease = '0.8.0'\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'pallets_sphinx_themes',\n 'sphinxcontrib.programoutput'\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3/', None)}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md']\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'sphinx'\n\n# HTML -----------------------------------------------------------------\n\nhtml_theme = 'click'\nhtml_theme_options = {'index_sidebar_logo': False}\nhtml_context = {\n 'project_links': [\n ProjectLink('Quickstart', 'quickstart.html'),\n ProjectLink('Documentation TOC', 'index.html#documentation'),\n ProjectLink('API Reference TOC', 'index.html#api-reference'),\n ProjectLink('Project Website', 'https://rastervision.io/'),\n ProjectLink('PyPI releases', 'https://pypi.org/project/rastervision/'),\n ProjectLink('GitHub', 'https://github.com/azavea/raster-vision'),\n ProjectLink('Gitter Channel', 'https://gitter.im/azavea/raster-vision'),\n ProjectLink('Raster Vision Examples', 'https://github.com/azavea/raster-vision-examples'),\n ProjectLink('QGIS Plugin', 'https://github.com/azavea/raster-vision-qgis'),\n ProjectLink('AWS Batch Setup', 'https://github.com/azavea/raster-vision-aws'),\n ProjectLink('Issue Tracker', 'https://github.com/azavea/raster-vision/issues/'),\n ProjectLink('CHANGELOG', 'changelog.html'),\n ProjectLink('Azavea', 'https://www.azavea.com/'),\n ],\n 'css_files': [\n '_static/rastervision.css',\n 'https://media.readthedocs.org/css/badge_only.css'\n ]\n}\nhtml_sidebars = {\n 'index': ['project.html', 'versions.html', 'searchbox.html'],\n '**': ['project.html', 'localtoc.html', 'relations.html', 'versions.html', 'searchbox.html'],\n}\nsinglehtml_sidebars = {'index': ['project.html', 'versions.html', 'localtoc.html']}\nhtml_static_path = ['_static']\nhtml_favicon = '_static/raster-vision-icon.png'\nhtml_logo = '_static/raster-vision-logo.png'\nhtml_title = 'Raster Vision Documentation ({})'.format(version)\nhtml_show_sourcelink = False\nhtml_domain_indices = False\nhtml_experimental_html5_writer = True\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'RasterVisiondoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'RasterVision.tex', 'Raster Vision Documentation',\n 'Azavea', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'RasterVisoin-{}.tex', html_title,\n [author], 'manual')\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'RasterVision', 'Raster Vision Documentation',\n author, 'RasterVision', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\nprogramoutput_prompt_template = '> {command}\\n{output}'\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]}
| 2,451 | 211 |
gh_patches_debug_3484
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-2115
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
It's not possible to delete event relations in django admin
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
When you mark an event relation for deletion it should be deleted when you save the object.
## What is the current behaviour?
The object is saved, but the relation is not removed.
## How do you reproduce this problem?
* Create an event
* Add a relation (e.g. Company or Attendance)
* Save
* Go to the admin panel for the event
* Mark the relation for deletion (click the delete checkbox by the relation)
* Save
* The relation is still there
## Other information
This is most likely due to us overwriting the `save_formset()` method in EventAdmin[0] and not taking care of deleting objects marked for deleting in the formset [1].
We're not doing anything custom in the method, so there's really no reason to override it. The code could actually be removed.
[0]: https://github.com/dotkom/onlineweb4/blob/develop/apps/events/admin.py#L160
[1]: https://docs.djangoproject.com/en/1.11/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_formset
</issue>
<code>
[start of apps/events/admin.py]
1 # -*- coding: utf-8 -*-
2
3 from django.contrib import admin, messages
4 from django.utils.translation import ugettext as _
5 from guardian.admin import GuardedModelAdmin
6 from reversion.admin import VersionAdmin
7
8 from apps.events.models import (AttendanceEvent, Attendee, CompanyEvent, Event, Extras,
9 FieldOfStudyRule, GradeRule, GroupRestriction, Reservation,
10 Reservee, RuleBundle, UserGroupRule)
11 from apps.feedback.admin import FeedbackRelationInline
12
13
14 class AttendeeInline(admin.TabularInline):
15 model = Attendee
16 extra = 1
17 classes = ('grp-collapse grp-open',) # style
18 inline_classes = ('grp-collapse grp-open',) # style
19
20
21 class CompanyInline(admin.TabularInline):
22 model = CompanyEvent
23 max_num = 20
24 extra = 0
25 classes = ('grp-collapse grp-open',) # style
26 inline_classes = ('grp-collapse grp-open',) # style
27
28
29 class RuleBundleInline(admin.TabularInline):
30 model = RuleBundle
31 extra = 1
32 max_num = 20
33 classes = ('grp-collapse grp-open',) # style
34 inline_classes = ('grp-collapse grp-open',) # style
35
36
37 class ExtrasInline(admin.TabularInline):
38 model = Extras
39 extra = 1
40 max_num = 20
41 classes = ('grp-collapse grp-open',) # style
42 inline_classes = ('grp-collapse grp-open',) # style
43
44
45 class GroupRestrictionInline(admin.TabularInline):
46 model = GroupRestriction
47 extra = 0
48 max_num = 1
49 classes = ('grp-collapse grp-open',) # style
50 inline_classes = ('grp-collapse grp-open',) # style
51 filter_horizontal = ('groups',)
52
53
54 def mark_paid(modeladmin, request, queryset):
55 queryset.update(paid=True)
56
57
58 mark_paid.short_description = "Merk som betalt"
59
60
61 def mark_not_paid(modeladmin, request, queryset):
62 queryset.update(paid=False)
63
64
65 mark_not_paid.short_description = "Merk som ikke betalt"
66
67
68 def mark_attended(modeladmin, request, queryset):
69 queryset.update(attended=True)
70
71
72 mark_attended.short_description = "Merk som møtt"
73
74
75 def mark_not_attended(modeladmin, request, queryset):
76 queryset.update(attended=False)
77
78
79 mark_not_attended.short_description = "Merk som ikke møtt"
80
81
82 class AttendeeAdmin(GuardedModelAdmin, VersionAdmin):
83 model = Attendee
84 ordering = ['-timestamp']
85 list_display = ('user', 'event', 'timestamp', 'paid', 'attended', 'note', 'extras')
86 list_filter = ('attended', 'paid', 'event__event')
87 search_fields = (
88 'event__event__title', '=event__event__id', 'user__first_name', 'user__last_name', 'user__username',
89 )
90 actions = [mark_paid, mark_attended, mark_not_paid, mark_not_attended]
91 group_owned_objects_field = 'event__event__organizer'
92 user_can_access_owned_by_group_objects_only = True
93
94 # Disable delete_selected http://bit.ly/1o4nleN
95 def get_actions(self, request):
96 actions = super(AttendeeAdmin, self).get_actions(request)
97 if 'delete_selected' in actions:
98 del actions['delete_selected']
99 return actions
100
101
102 class CompanyEventAdmin(VersionAdmin):
103 model = CompanyEvent
104 inlines = (CompanyInline,)
105
106
107 class ExtrasAdmin(VersionAdmin):
108 model = Extras
109 fk_name = 'choice'
110 # inlines = (ExtrasInline,)
111
112
113 class RuleBundleAdmin(VersionAdmin):
114 model = RuleBundle
115
116
117 class FieldOfStudyRuleAdmin(VersionAdmin):
118 model = FieldOfStudyRule
119
120
121 class GradeRuleAdmin(VersionAdmin):
122 model = GradeRule
123
124
125 class UserGroupRuleAdmin(VersionAdmin):
126 model = UserGroupRule
127
128
129 class AttendanceEventInline(admin.StackedInline):
130 model = AttendanceEvent
131 max_num = 1
132 extra = 0
133 filter_horizontal = ('rule_bundles',)
134 classes = ('grp-collapse grp-open',) # style
135 inline_classes = ('grp-collapse grp-open',) # style
136 exclude = ("marks_has_been_set",)
137
138
139 class EventAdmin(GuardedModelAdmin, VersionAdmin):
140 inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline, GroupRestrictionInline)
141 exclude = ("author", )
142 list_display = ['__str__', 'event_type', 'organizer']
143 list_filter = ['event_type', 'organizer']
144 search_fields = ('title',)
145
146 group_owned_objects_field = 'organizer'
147 user_can_access_owned_by_group_objects_only = True
148
149 def save_model(self, request, obj, form, change):
150 if not change: # created
151 obj.author = request.user
152 obj.save()
153
154 def save_formset(self, request, form, formset, change):
155 instances = formset.save(commit=False)
156 for instance in instances:
157 instance.save()
158 formset.save_m2m()
159
160
161 class ReserveeInline(admin.TabularInline):
162 model = Reservee
163 extra = 1
164 classes = ('grp-collapse grp-open',) # style
165 inline_classes = ('grp-collapse grp-open',) # style
166
167
168 class ReservationAdmin(GuardedModelAdmin, VersionAdmin):
169 model = Reservation
170 inlines = (ReserveeInline,)
171 max_num = 1
172 extra = 0
173 list_display = ('attendance_event', '_number_of_seats_taken', 'seats', '_attendees', '_max_capacity')
174 classes = ('grp-collapse grp-open',) # style
175 inline_classes = ('grp-collapse grp-open',) # style
176 user_can_access_owned_by_group_objects_only = True
177 group_owned_objects_field = 'attendance_event__event__organizer'
178
179 def _number_of_seats_taken(self, obj):
180 return obj.number_of_seats_taken
181 _number_of_seats_taken.short_description = _("Fylte reservasjoner")
182
183 def _attendees(self, obj):
184 return obj.attendance_event.number_of_attendees
185 _attendees.short_description = _("Antall deltakere")
186
187 def _max_capacity(self, obj):
188 return obj.attendance_event.max_capacity
189 _max_capacity.short_description = _("Arrangementets maks-kapasitet")
190
191 def save_model(self, request, obj, form, change):
192 attendance_event = AttendanceEvent.objects.get(pk=obj.attendance_event.event)
193 number_of_free_seats = attendance_event.max_capacity - attendance_event.number_of_attendees
194 if number_of_free_seats < obj.seats:
195 obj.seats = number_of_free_seats
196 self.message_user(request, _(
197 "Du har valgt et antall reserverte plasser som overskrider antallet ledige plasser for dette "
198 "arrangementet. Antallet ble automatisk justert til %d (alle ledige plasser)."
199 ) % number_of_free_seats, messages.WARNING)
200 obj.save()
201
202
203 admin.site.register(Event, EventAdmin)
204 admin.site.register(Attendee, AttendeeAdmin)
205 admin.site.register(RuleBundle, RuleBundleAdmin)
206 admin.site.register(Extras, ExtrasAdmin)
207 admin.site.register(GradeRule, GradeRuleAdmin)
208 admin.site.register(UserGroupRule, UserGroupRuleAdmin)
209 admin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)
210 admin.site.register(Reservation, ReservationAdmin)
211
[end of apps/events/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/events/admin.py b/apps/events/admin.py
--- a/apps/events/admin.py
+++ b/apps/events/admin.py
@@ -151,12 +151,6 @@
obj.author = request.user
obj.save()
- def save_formset(self, request, form, formset, change):
- instances = formset.save(commit=False)
- for instance in instances:
- instance.save()
- formset.save_m2m()
-
class ReserveeInline(admin.TabularInline):
model = Reservee
|
{"golden_diff": "diff --git a/apps/events/admin.py b/apps/events/admin.py\n--- a/apps/events/admin.py\n+++ b/apps/events/admin.py\n@@ -151,12 +151,6 @@\n obj.author = request.user\n obj.save()\n \n- def save_formset(self, request, form, formset, change):\n- instances = formset.save(commit=False)\n- for instance in instances:\n- instance.save()\n- formset.save_m2m()\n-\n \n class ReserveeInline(admin.TabularInline):\n model = Reservee\n", "issue": "It's not possible to delete event relations in django admin\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\n\r\nWhen you mark an event relation for deletion it should be deleted when you save the object.\r\n\r\n## What is the current behaviour?\r\n\r\nThe object is saved, but the relation is not removed.\r\n\r\n\r\n## How do you reproduce this problem? \r\n\r\n* Create an event\r\n* Add a relation (e.g. Company or Attendance)\r\n* Save\r\n* Go to the admin panel for the event\r\n* Mark the relation for deletion (click the delete checkbox by the relation)\r\n* Save\r\n* The relation is still there\r\n\r\n\r\n## Other information\r\n\r\nThis is most likely due to us overwriting the `save_formset()` method in EventAdmin[0] and not taking care of deleting objects marked for deleting in the formset [1].\r\n\r\nWe're not doing anything custom in the method, so there's really no reason to override it. The code could actually be removed.\r\n\r\n[0]: https://github.com/dotkom/onlineweb4/blob/develop/apps/events/admin.py#L160\r\n[1]: https://docs.djangoproject.com/en/1.11/ref/contrib/admin/#django.contrib.admin.ModelAdmin.save_formset\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.contrib import admin, messages\nfrom django.utils.translation import ugettext as _\nfrom guardian.admin import GuardedModelAdmin\nfrom reversion.admin import VersionAdmin\n\nfrom apps.events.models import (AttendanceEvent, Attendee, CompanyEvent, Event, Extras,\n FieldOfStudyRule, GradeRule, GroupRestriction, Reservation,\n Reservee, RuleBundle, UserGroupRule)\nfrom apps.feedback.admin import FeedbackRelationInline\n\n\nclass AttendeeInline(admin.TabularInline):\n model = Attendee\n extra = 1\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass CompanyInline(admin.TabularInline):\n model = CompanyEvent\n max_num = 20\n extra = 0\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass RuleBundleInline(admin.TabularInline):\n model = RuleBundle\n extra = 1\n max_num = 20\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass ExtrasInline(admin.TabularInline):\n model = Extras\n extra = 1\n max_num = 20\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass GroupRestrictionInline(admin.TabularInline):\n model = GroupRestriction\n extra = 0\n max_num = 1\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n filter_horizontal = ('groups',)\n\n\ndef mark_paid(modeladmin, request, queryset):\n queryset.update(paid=True)\n\n\nmark_paid.short_description = \"Merk som betalt\"\n\n\ndef mark_not_paid(modeladmin, request, queryset):\n queryset.update(paid=False)\n\n\nmark_not_paid.short_description = \"Merk som ikke betalt\"\n\n\ndef mark_attended(modeladmin, request, queryset):\n queryset.update(attended=True)\n\n\nmark_attended.short_description = \"Merk som m\u00f8tt\"\n\n\ndef mark_not_attended(modeladmin, request, queryset):\n queryset.update(attended=False)\n\n\nmark_not_attended.short_description = \"Merk som ikke m\u00f8tt\"\n\n\nclass AttendeeAdmin(GuardedModelAdmin, VersionAdmin):\n model = Attendee\n ordering = ['-timestamp']\n list_display = ('user', 'event', 'timestamp', 'paid', 'attended', 'note', 'extras')\n list_filter = ('attended', 'paid', 'event__event')\n search_fields = (\n 'event__event__title', '=event__event__id', 'user__first_name', 'user__last_name', 'user__username',\n )\n actions = [mark_paid, mark_attended, mark_not_paid, mark_not_attended]\n group_owned_objects_field = 'event__event__organizer'\n user_can_access_owned_by_group_objects_only = True\n\n # Disable delete_selected http://bit.ly/1o4nleN\n def get_actions(self, request):\n actions = super(AttendeeAdmin, self).get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n\nclass CompanyEventAdmin(VersionAdmin):\n model = CompanyEvent\n inlines = (CompanyInline,)\n\n\nclass ExtrasAdmin(VersionAdmin):\n model = Extras\n fk_name = 'choice'\n # inlines = (ExtrasInline,)\n\n\nclass RuleBundleAdmin(VersionAdmin):\n model = RuleBundle\n\n\nclass FieldOfStudyRuleAdmin(VersionAdmin):\n model = FieldOfStudyRule\n\n\nclass GradeRuleAdmin(VersionAdmin):\n model = GradeRule\n\n\nclass UserGroupRuleAdmin(VersionAdmin):\n model = UserGroupRule\n\n\nclass AttendanceEventInline(admin.StackedInline):\n model = AttendanceEvent\n max_num = 1\n extra = 0\n filter_horizontal = ('rule_bundles',)\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n exclude = (\"marks_has_been_set\",)\n\n\nclass EventAdmin(GuardedModelAdmin, VersionAdmin):\n inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline, GroupRestrictionInline)\n exclude = (\"author\", )\n list_display = ['__str__', 'event_type', 'organizer']\n list_filter = ['event_type', 'organizer']\n search_fields = ('title',)\n\n group_owned_objects_field = 'organizer'\n user_can_access_owned_by_group_objects_only = True\n\n def save_model(self, request, obj, form, change):\n if not change: # created\n obj.author = request.user\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instance in instances:\n instance.save()\n formset.save_m2m()\n\n\nclass ReserveeInline(admin.TabularInline):\n model = Reservee\n extra = 1\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass ReservationAdmin(GuardedModelAdmin, VersionAdmin):\n model = Reservation\n inlines = (ReserveeInline,)\n max_num = 1\n extra = 0\n list_display = ('attendance_event', '_number_of_seats_taken', 'seats', '_attendees', '_max_capacity')\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n user_can_access_owned_by_group_objects_only = True\n group_owned_objects_field = 'attendance_event__event__organizer'\n\n def _number_of_seats_taken(self, obj):\n return obj.number_of_seats_taken\n _number_of_seats_taken.short_description = _(\"Fylte reservasjoner\")\n\n def _attendees(self, obj):\n return obj.attendance_event.number_of_attendees\n _attendees.short_description = _(\"Antall deltakere\")\n\n def _max_capacity(self, obj):\n return obj.attendance_event.max_capacity\n _max_capacity.short_description = _(\"Arrangementets maks-kapasitet\")\n\n def save_model(self, request, obj, form, change):\n attendance_event = AttendanceEvent.objects.get(pk=obj.attendance_event.event)\n number_of_free_seats = attendance_event.max_capacity - attendance_event.number_of_attendees\n if number_of_free_seats < obj.seats:\n obj.seats = number_of_free_seats\n self.message_user(request, _(\n \"Du har valgt et antall reserverte plasser som overskrider antallet ledige plasser for dette \"\n \"arrangementet. Antallet ble automatisk justert til %d (alle ledige plasser).\"\n ) % number_of_free_seats, messages.WARNING)\n obj.save()\n\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Attendee, AttendeeAdmin)\nadmin.site.register(RuleBundle, RuleBundleAdmin)\nadmin.site.register(Extras, ExtrasAdmin)\nadmin.site.register(GradeRule, GradeRuleAdmin)\nadmin.site.register(UserGroupRule, UserGroupRuleAdmin)\nadmin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)\nadmin.site.register(Reservation, ReservationAdmin)\n", "path": "apps/events/admin.py"}]}
| 2,983 | 120 |
gh_patches_debug_1725
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-1350
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python deployment did not remove RELEASE.rst
In c216ddca8155cdb05053bcfe2ac51814447aac4f, which blocks further merges. @DRMacIver, I think cb84c3ad4dfdecfcf947ccd24bfdee111083645b shouldn't have taken out the `git rm RELEASE.rst`...
AFAICT ea6bb819143d5c6b4e4c43aa2436a9f70d129872 would be a decent fix. (and is now in all my open PRs)
</issue>
<code>
[start of tooling/src/hypothesistooling/releasemanagement.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 """Helpful common code for release management tasks that is shared across
19 multiple projects.
20
21 Note that most code in here is brittle and specific to our build and
22 probably makes all sorts of undocumented assumptions, even as it looks
23 like a nice tidy reusable set of functionality.
24 """
25
26
27 from __future__ import division, print_function, absolute_import
28
29 import re
30 from datetime import datetime, timedelta
31
32 import hypothesistooling as tools
33
34
35 def release_date_string():
36 """Returns a date string that represents what should be considered "today"
37 for the purposes of releasing. It is always measured in UTC, but if it's in
38 the last hour of the day it will actually be considered tomorrow.
39
40 The reason for counting it as the later day is that it ensures that
41 (unless our release process takes more than 23 hours) this value
42 remains consistent throughout the entire release.
43 """
44 now = datetime.utcnow()
45
46 return max([
47 d.strftime('%Y-%m-%d') for d in (now, now + timedelta(hours=1))
48 ])
49
50
51 def assignment_matcher(name):
52 """
53 Matches a single line of the form (some space)name = (some value). e.g.
54 " foo = 1".
55 The whole line up to the assigned value is the first matching group,
56 the rest of the line is the second matching group.
57 i.e. group 1 is the assignment, group 2 is the value. In the above
58 example group 1 would be " foo = " and group 2 would be "1"
59 """
60 return re.compile(r'\A(\s*%s\s*=\s*)(.+)\Z' % (re.escape(name),))
61
62
63 def extract_assignment_from_string(contents, name):
64 lines = contents.split('\n')
65
66 matcher = assignment_matcher(name)
67
68 for i, l in enumerate(lines):
69 match = matcher.match(l)
70 if match is not None:
71 return match[2].strip()
72
73 raise ValueError('Key %s not found in %s' % (
74 name, contents
75 ))
76
77
78 def extract_assignment(filename, name):
79 with open(filename) as i:
80 return extract_assignment_from_string(i.read(), name)
81
82
83 def replace_assignment_in_string(contents, name, value):
84 lines = contents.split('\n')
85
86 matcher = assignment_matcher(name)
87
88 count = 0
89
90 for i, l in enumerate(lines):
91 match = matcher.match(l)
92 if match is not None:
93 count += 1
94 lines[i] = match[1] + value
95
96 if count == 0:
97 raise ValueError('Key %s not found in %s' % (
98 name, contents
99 ))
100 if count > 1:
101 raise ValueError('Key %s found %d times in %s' % (
102 name, count, contents
103 ))
104
105 return '\n'.join(lines)
106
107
108 def replace_assignment(filename, name, value):
109 """Replaces a single assignment of the form key = value in a file with a
110 new value, attempting to preserve the existing format.
111
112 This is fairly fragile - in particular it knows nothing about
113 the file format. The existing value is simply the rest of the line after
114 the last space after the equals.
115 """
116 with open(filename) as i:
117 contents = i.read()
118 result = replace_assignment_in_string(contents, name, value)
119 with open(filename, 'w') as o:
120 o.write(result)
121
122
123 RELEASE_TYPE = re.compile(r"^RELEASE_TYPE: +(major|minor|patch)")
124
125
126 MAJOR = 'major'
127 MINOR = 'minor'
128 PATCH = 'patch'
129
130
131 VALID_RELEASE_TYPES = (MAJOR, MINOR, PATCH)
132
133
134 def parse_release_file(filename):
135 with open(filename) as i:
136 return parse_release_file_contents(i.read(), filename)
137
138
139 def parse_release_file_contents(release_contents, filename):
140 release_lines = release_contents.split('\n')
141
142 m = RELEASE_TYPE.match(release_lines[0])
143 if m is not None:
144 release_type = m.group(1)
145 if release_type not in VALID_RELEASE_TYPES:
146 raise ValueError('Unrecognised release type %r' % (release_type,))
147 del release_lines[0]
148 release_contents = '\n'.join(release_lines).strip()
149 else:
150 raise ValueError(
151 '%s does not start by specifying release type. The first '
152 'line of the file should be RELEASE_TYPE: followed by one of '
153 'major, minor, or patch, to specify the type of release that '
154 'this is (i.e. which version number to increment). Instead the '
155 'first line was %r' % (filename, release_lines[0],)
156 )
157
158 return release_type, release_contents
159
160
161 def bump_version_info(version_info, release_type):
162 new_version = list(version_info)
163 bump = VALID_RELEASE_TYPES.index(release_type)
164 new_version[bump] += 1
165 for i in range(bump + 1, len(new_version)):
166 new_version[i] = 0
167 new_version = tuple(new_version)
168 new_version_string = '.'.join(map(str, new_version))
169 return new_version_string, new_version
170
171
172 def update_markdown_changelog(changelog, name, version, entry):
173 with open(changelog) as i:
174 prev_contents = i.read()
175
176 title = '# %(name)s %(version)s (%(date)s)\n\n' % {
177 'name': name, 'version': version, 'date': release_date_string(),
178 }
179
180 with open(changelog, 'w') as o:
181 o.write(title)
182 o.write(entry.strip())
183 o.write('\n\n')
184 o.write(prev_contents)
185
186
187 def parse_version(version):
188 return tuple(map(int, version.split('.')))
189
190
191 def commit_pending_release(project):
192 """Create a commit with the new release."""
193 tools.git('add', '-u', project.BASE_DIR)
194
195 tools.git(
196 'commit', '-m',
197 'Bump %s version to %s and update changelog'
198 '\n\n[skip ci]' % (project.PACKAGE_NAME, project.current_version(),)
199 )
200
[end of tooling/src/hypothesistooling/releasemanagement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tooling/src/hypothesistooling/releasemanagement.py b/tooling/src/hypothesistooling/releasemanagement.py
--- a/tooling/src/hypothesistooling/releasemanagement.py
+++ b/tooling/src/hypothesistooling/releasemanagement.py
@@ -190,6 +190,7 @@
def commit_pending_release(project):
"""Create a commit with the new release."""
+ tools.git('rm', project.RELEASE_FILE)
tools.git('add', '-u', project.BASE_DIR)
tools.git(
|
{"golden_diff": "diff --git a/tooling/src/hypothesistooling/releasemanagement.py b/tooling/src/hypothesistooling/releasemanagement.py\n--- a/tooling/src/hypothesistooling/releasemanagement.py\n+++ b/tooling/src/hypothesistooling/releasemanagement.py\n@@ -190,6 +190,7 @@\n \n def commit_pending_release(project):\n \"\"\"Create a commit with the new release.\"\"\"\n+ tools.git('rm', project.RELEASE_FILE)\n tools.git('add', '-u', project.BASE_DIR)\n \n tools.git(\n", "issue": "Python deployment did not remove RELEASE.rst\nIn c216ddca8155cdb05053bcfe2ac51814447aac4f, which blocks further merges. @DRMacIver, I think cb84c3ad4dfdecfcf947ccd24bfdee111083645b shouldn't have taken out the `git rm RELEASE.rst`... \r\n\r\nAFAICT ea6bb819143d5c6b4e4c43aa2436a9f70d129872 would be a decent fix. (and is now in all my open PRs)\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n\"\"\"Helpful common code for release management tasks that is shared across\nmultiple projects.\n\nNote that most code in here is brittle and specific to our build and\nprobably makes all sorts of undocumented assumptions, even as it looks\nlike a nice tidy reusable set of functionality.\n\"\"\"\n\n\nfrom __future__ import division, print_function, absolute_import\n\nimport re\nfrom datetime import datetime, timedelta\n\nimport hypothesistooling as tools\n\n\ndef release_date_string():\n \"\"\"Returns a date string that represents what should be considered \"today\"\n for the purposes of releasing. It is always measured in UTC, but if it's in\n the last hour of the day it will actually be considered tomorrow.\n\n The reason for counting it as the later day is that it ensures that\n (unless our release process takes more than 23 hours) this value\n remains consistent throughout the entire release.\n \"\"\"\n now = datetime.utcnow()\n\n return max([\n d.strftime('%Y-%m-%d') for d in (now, now + timedelta(hours=1))\n ])\n\n\ndef assignment_matcher(name):\n \"\"\"\n Matches a single line of the form (some space)name = (some value). e.g.\n \" foo = 1\".\n The whole line up to the assigned value is the first matching group,\n the rest of the line is the second matching group.\n i.e. group 1 is the assignment, group 2 is the value. In the above\n example group 1 would be \" foo = \" and group 2 would be \"1\"\n \"\"\"\n return re.compile(r'\\A(\\s*%s\\s*=\\s*)(.+)\\Z' % (re.escape(name),))\n\n\ndef extract_assignment_from_string(contents, name):\n lines = contents.split('\\n')\n\n matcher = assignment_matcher(name)\n\n for i, l in enumerate(lines):\n match = matcher.match(l)\n if match is not None:\n return match[2].strip()\n\n raise ValueError('Key %s not found in %s' % (\n name, contents\n ))\n\n\ndef extract_assignment(filename, name):\n with open(filename) as i:\n return extract_assignment_from_string(i.read(), name)\n\n\ndef replace_assignment_in_string(contents, name, value):\n lines = contents.split('\\n')\n\n matcher = assignment_matcher(name)\n\n count = 0\n\n for i, l in enumerate(lines):\n match = matcher.match(l)\n if match is not None:\n count += 1\n lines[i] = match[1] + value\n\n if count == 0:\n raise ValueError('Key %s not found in %s' % (\n name, contents\n ))\n if count > 1:\n raise ValueError('Key %s found %d times in %s' % (\n name, count, contents\n ))\n\n return '\\n'.join(lines)\n\n\ndef replace_assignment(filename, name, value):\n \"\"\"Replaces a single assignment of the form key = value in a file with a\n new value, attempting to preserve the existing format.\n\n This is fairly fragile - in particular it knows nothing about\n the file format. The existing value is simply the rest of the line after\n the last space after the equals.\n \"\"\"\n with open(filename) as i:\n contents = i.read()\n result = replace_assignment_in_string(contents, name, value)\n with open(filename, 'w') as o:\n o.write(result)\n\n\nRELEASE_TYPE = re.compile(r\"^RELEASE_TYPE: +(major|minor|patch)\")\n\n\nMAJOR = 'major'\nMINOR = 'minor'\nPATCH = 'patch'\n\n\nVALID_RELEASE_TYPES = (MAJOR, MINOR, PATCH)\n\n\ndef parse_release_file(filename):\n with open(filename) as i:\n return parse_release_file_contents(i.read(), filename)\n\n\ndef parse_release_file_contents(release_contents, filename):\n release_lines = release_contents.split('\\n')\n\n m = RELEASE_TYPE.match(release_lines[0])\n if m is not None:\n release_type = m.group(1)\n if release_type not in VALID_RELEASE_TYPES:\n raise ValueError('Unrecognised release type %r' % (release_type,))\n del release_lines[0]\n release_contents = '\\n'.join(release_lines).strip()\n else:\n raise ValueError(\n '%s does not start by specifying release type. The first '\n 'line of the file should be RELEASE_TYPE: followed by one of '\n 'major, minor, or patch, to specify the type of release that '\n 'this is (i.e. which version number to increment). Instead the '\n 'first line was %r' % (filename, release_lines[0],)\n )\n\n return release_type, release_contents\n\n\ndef bump_version_info(version_info, release_type):\n new_version = list(version_info)\n bump = VALID_RELEASE_TYPES.index(release_type)\n new_version[bump] += 1\n for i in range(bump + 1, len(new_version)):\n new_version[i] = 0\n new_version = tuple(new_version)\n new_version_string = '.'.join(map(str, new_version))\n return new_version_string, new_version\n\n\ndef update_markdown_changelog(changelog, name, version, entry):\n with open(changelog) as i:\n prev_contents = i.read()\n\n title = '# %(name)s %(version)s (%(date)s)\\n\\n' % {\n 'name': name, 'version': version, 'date': release_date_string(),\n }\n\n with open(changelog, 'w') as o:\n o.write(title)\n o.write(entry.strip())\n o.write('\\n\\n')\n o.write(prev_contents)\n\n\ndef parse_version(version):\n return tuple(map(int, version.split('.')))\n\n\ndef commit_pending_release(project):\n \"\"\"Create a commit with the new release.\"\"\"\n tools.git('add', '-u', project.BASE_DIR)\n\n tools.git(\n 'commit', '-m',\n 'Bump %s version to %s and update changelog'\n '\\n\\n[skip ci]' % (project.PACKAGE_NAME, project.current_version(),)\n )\n", "path": "tooling/src/hypothesistooling/releasemanagement.py"}]}
| 2,725 | 133 |
gh_patches_debug_34358
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1404
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Option to specify alternate config directory on first run
When the bot is first run, it executes ```sopel/run_script.py``` at startup.
Said script is hardcoded to create ```~/.sopel``` if it doesn't already exist.
Should there be an option to allow the user to specify an alternate directory to use?
This was observed on d9cfb41 running ```python sopel.py``` without prior configuration.
</issue>
<code>
[start of sopel/config/__init__.py]
1 # coding=utf-8
2 """
3 The config object provides a simplified to access Sopel's configuration file.
4 The sections of the file are attributes of the object, and the keys in the
5 section are attributes of that. So, for example, the ``eggs`` attribute in the
6 ``[spam]`` section can be accessed from ``config.spam.eggs``.
7
8 Section definitions (see "Section configuration sections" below) can be added
9 to the config object with ``define_section``. When this is done, only the
10 defined keys will be available. A section can not be given more than one
11 definition. The ``[core]`` section is defined with ``CoreSection`` when the
12 object is initialized.
13
14 .. versionadded:: 6.0.0
15 """
16 # Copyright 2012-2015, Elsie Powell, embolalia.com
17 # Copyright © 2012, Elad Alfassa <[email protected]>
18 # Licensed under the Eiffel Forum License 2.
19
20 from __future__ import unicode_literals, absolute_import, print_function, division
21
22 from sopel.tools import iteritems, stderr
23 import sopel.tools
24 from sopel.tools import get_input
25 import sopel.loader
26 import os
27 import sys
28 if sys.version_info.major < 3:
29 import ConfigParser
30 else:
31 basestring = str
32 import configparser as ConfigParser
33 import sopel.config.core_section
34 from sopel.config.types import StaticSection
35
36
37 DEFAULT_HOMEDIR = os.path.join(os.path.expanduser('~'), '.sopel')
38
39
40 class ConfigurationError(Exception):
41 """ Exception type for configuration errors """
42
43 def __init__(self, value):
44 self.value = value
45
46 def __str__(self):
47 return 'ConfigurationError: %s' % self.value
48
49
50 class Config(object):
51 def __init__(self, filename, validate=True):
52 """The bot's configuration.
53
54 The given filename will be associated with the configuration, and is
55 the file which will be written if write() is called. If load is not
56 given or True, the configuration object will load the attributes from
57 the file at filename.
58
59 A few default values will be set here if they are not defined in the
60 config file, or a config file is not loaded. They are documented below.
61 """
62 self.filename = filename
63 """The config object's associated file, as noted above."""
64 self.parser = ConfigParser.RawConfigParser(allow_no_value=True)
65 self.parser.read(self.filename)
66 self.define_section('core', sopel.config.core_section.CoreSection,
67 validate=validate)
68 self.get = self.parser.get
69
70 @property
71 def homedir(self):
72 """An alias to config.core.homedir"""
73 # Technically it's the other way around, so we can bootstrap filename
74 # attributes in the core section, but whatever.
75 configured = None
76 if self.parser.has_option('core', 'homedir'):
77 configured = self.parser.get('core', 'homedir')
78 if configured:
79 return configured
80 else:
81 return os.path.dirname(self.filename)
82
83 def save(self):
84 """Save all changes to the config file."""
85 cfgfile = open(self.filename, 'w')
86 self.parser.write(cfgfile)
87 cfgfile.flush()
88 cfgfile.close()
89
90 def add_section(self, name):
91 """Add a section to the config file.
92
93 Returns ``False`` if already exists.
94 """
95 try:
96 return self.parser.add_section(name)
97 except ConfigParser.DuplicateSectionError:
98 return False
99
100 def define_section(self, name, cls_, validate=True):
101 """Define the available settings in a section.
102
103 ``cls_`` must be a subclass of ``StaticSection``. If the section has
104 already been defined with a different class, ValueError is raised.
105
106 If ``validate`` is True, the section's values will be validated, and an
107 exception raised if they are invalid. This is desirable in a module's
108 setup function, for example, but might not be in the configure function.
109 """
110 if not issubclass(cls_, StaticSection):
111 raise ValueError("Class must be a subclass of StaticSection.")
112 current = getattr(self, name, None)
113 current_name = str(current.__class__)
114 new_name = str(cls_)
115 if (current is not None and not isinstance(current, self.ConfigSection) and
116 not current_name == new_name):
117 raise ValueError(
118 "Can not re-define class for section from {} to {}.".format(
119 current_name, new_name)
120 )
121 setattr(self, name, cls_(self, name, validate=validate))
122
123 class ConfigSection(object):
124
125 """Represents a section of the config file.
126
127 Contains all keys in thesection as attributes.
128
129 """
130
131 def __init__(self, name, items, parent):
132 object.__setattr__(self, '_name', name)
133 object.__setattr__(self, '_parent', parent)
134 for item in items:
135 value = item[1].strip()
136 if not value.lower() == 'none':
137 if value.lower() == 'false':
138 value = False
139 object.__setattr__(self, item[0], value)
140
141 def __getattr__(self, name):
142 return None
143
144 def __setattr__(self, name, value):
145 object.__setattr__(self, name, value)
146 if type(value) is list:
147 value = ','.join(value)
148 self._parent.parser.set(self._name, name, value)
149
150 def get_list(self, name):
151 value = getattr(self, name)
152 if not value:
153 return []
154 if isinstance(value, basestring):
155 value = value.split(',')
156 # Keep the split value, so we don't have to keep doing this
157 setattr(self, name, value)
158 return value
159
160 def __getattr__(self, name):
161 if name in self.parser.sections():
162 items = self.parser.items(name)
163 section = self.ConfigSection(name, items, self) # Return a section
164 setattr(self, name, section)
165 return section
166 else:
167 raise AttributeError("%r object has no attribute %r"
168 % (type(self).__name__, name))
169
170 def option(self, question, default=False):
171 """Ask "y/n" and return the corresponding boolean answer.
172
173 Show user in terminal a "y/n" prompt, and return true or false based on
174 the response. If default is passed as true, the default will be shown
175 as ``[y]``, else it will be ``[n]``. ``question`` should be phrased as
176 a question, but without a question mark at the end.
177
178 """
179 d = 'n'
180 if default:
181 d = 'y'
182 ans = get_input(question + ' (y/n)? [' + d + '] ')
183 if not ans:
184 ans = d
185 return ans.lower() == 'y'
186
187 def _modules(self):
188 home = os.getcwd()
189 modules_dir = os.path.join(home, 'modules')
190 filenames = sopel.loader.enumerate_modules(self)
191 os.sys.path.insert(0, modules_dir)
192 for name, mod_spec in iteritems(filenames):
193 path, type_ = mod_spec
194 try:
195 module, _ = sopel.loader.load_module(name, path, type_)
196 except Exception as e:
197 filename, lineno = sopel.tools.get_raising_file_and_line()
198 rel_path = os.path.relpath(filename, os.path.dirname(__file__))
199 raising_stmt = "%s:%d" % (rel_path, lineno)
200 stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
201 else:
202 if hasattr(module, 'configure'):
203 prompt = name + ' module'
204 if module.__doc__:
205 doc = module.__doc__.split('\n', 1)[0]
206 if doc:
207 prompt = doc
208 prompt = 'Configure {} (y/n)? [n]'.format(prompt)
209 do_configure = get_input(prompt)
210 do_configure = do_configure and do_configure.lower() == 'y'
211 if do_configure:
212 module.configure(self)
213 self.save()
214
215
216 def _wizard(section, config=None):
217 dotdir = os.path.expanduser('~/.sopel')
218 configpath = os.path.join(dotdir, ((config or 'default.cfg') + ('.cfg' if config and not config.endswith('.cfg') else '')))
219 if section == 'all':
220 _create_config(configpath)
221 elif section == 'mod':
222 _check_dir(False)
223 if not os.path.isfile(configpath):
224 print("No config file found." +
225 " Please make one before configuring these options.")
226 sys.exit(1)
227 config = Config(configpath, validate=False)
228 config._modules()
229
230
231 def _check_dir(create=True):
232 dotdir = os.path.join(os.path.expanduser('~'), '.sopel')
233 if not os.path.isdir(dotdir):
234 if create:
235 print('Creating a config directory at ~/.sopel...')
236 try:
237 os.makedirs(dotdir)
238 except Exception as e:
239 print('There was a problem creating %s:' % dotdir, file=sys.stderr)
240 print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)
241 print('Please fix this and then run Sopel again.', file=sys.stderr)
242 sys.exit(1)
243 else:
244 print("No config file found. Please make one before configuring these options.")
245 sys.exit(1)
246
247
248 def _create_config(configpath):
249 _check_dir()
250 print("Please answer the following questions" +
251 " to create your configuration file:\n")
252 try:
253 config = Config(configpath, validate=False)
254 sopel.config.core_section.configure(config)
255 if config.option(
256 'Would you like to see if there are any modules'
257 ' that need configuring'
258 ):
259 config._modules()
260 config.save()
261 except Exception: # TODO: Be specific
262 print("Encountered an error while writing the config file." +
263 " This shouldn't happen. Check permissions.")
264 raise
265
266 print("Config file written successfully!")
267 return config.filename
268
[end of sopel/config/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/config/__init__.py b/sopel/config/__init__.py
--- a/sopel/config/__init__.py
+++ b/sopel/config/__init__.py
@@ -214,12 +214,12 @@
def _wizard(section, config=None):
- dotdir = os.path.expanduser('~/.sopel')
+ dotdir = os.path.dirname(config) if config is not None else DEFAULT_HOMEDIR
configpath = os.path.join(dotdir, ((config or 'default.cfg') + ('.cfg' if config and not config.endswith('.cfg') else '')))
if section == 'all':
_create_config(configpath)
elif section == 'mod':
- _check_dir(False)
+ _check_dir(dotdir, False)
if not os.path.isfile(configpath):
print("No config file found." +
" Please make one before configuring these options.")
@@ -228,15 +228,14 @@
config._modules()
-def _check_dir(create=True):
- dotdir = os.path.join(os.path.expanduser('~'), '.sopel')
- if not os.path.isdir(dotdir):
+def _check_dir(path=DEFAULT_HOMEDIR, create=True):
+ if not os.path.isdir(path):
if create:
- print('Creating a config directory at ~/.sopel...')
+ print('Creating a config directory at {}...'.format(path))
try:
- os.makedirs(dotdir)
+ os.makedirs(path)
except Exception as e:
- print('There was a problem creating %s:' % dotdir, file=sys.stderr)
+ print('There was a problem creating %s:' % path, file=sys.stderr)
print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)
print('Please fix this and then run Sopel again.', file=sys.stderr)
sys.exit(1)
@@ -246,7 +245,7 @@
def _create_config(configpath):
- _check_dir()
+ _check_dir(os.path.dirname(configpath))
print("Please answer the following questions" +
" to create your configuration file:\n")
try:
|
{"golden_diff": "diff --git a/sopel/config/__init__.py b/sopel/config/__init__.py\n--- a/sopel/config/__init__.py\n+++ b/sopel/config/__init__.py\n@@ -214,12 +214,12 @@\n \n \n def _wizard(section, config=None):\n- dotdir = os.path.expanduser('~/.sopel')\n+ dotdir = os.path.dirname(config) if config is not None else DEFAULT_HOMEDIR\n configpath = os.path.join(dotdir, ((config or 'default.cfg') + ('.cfg' if config and not config.endswith('.cfg') else '')))\n if section == 'all':\n _create_config(configpath)\n elif section == 'mod':\n- _check_dir(False)\n+ _check_dir(dotdir, False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" +\n \" Please make one before configuring these options.\")\n@@ -228,15 +228,14 @@\n config._modules()\n \n \n-def _check_dir(create=True):\n- dotdir = os.path.join(os.path.expanduser('~'), '.sopel')\n- if not os.path.isdir(dotdir):\n+def _check_dir(path=DEFAULT_HOMEDIR, create=True):\n+ if not os.path.isdir(path):\n if create:\n- print('Creating a config directory at ~/.sopel...')\n+ print('Creating a config directory at {}...'.format(path))\n try:\n- os.makedirs(dotdir)\n+ os.makedirs(path)\n except Exception as e:\n- print('There was a problem creating %s:' % dotdir, file=sys.stderr)\n+ print('There was a problem creating %s:' % path, file=sys.stderr)\n print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)\n print('Please fix this and then run Sopel again.', file=sys.stderr)\n sys.exit(1)\n@@ -246,7 +245,7 @@\n \n \n def _create_config(configpath):\n- _check_dir()\n+ _check_dir(os.path.dirname(configpath))\n print(\"Please answer the following questions\" +\n \" to create your configuration file:\\n\")\n try:\n", "issue": "Option to specify alternate config directory on first run\nWhen the bot is first run, it executes ```sopel/run_script.py``` at startup.\r\nSaid script is hardcoded to create ```~/.sopel``` if it doesn't already exist.\r\nShould there be an option to allow the user to specify an alternate directory to use?\r\n\r\nThis was observed on d9cfb41 running ```python sopel.py``` without prior configuration.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nThe config object provides a simplified to access Sopel's configuration file.\nThe sections of the file are attributes of the object, and the keys in the\nsection are attributes of that. So, for example, the ``eggs`` attribute in the\n``[spam]`` section can be accessed from ``config.spam.eggs``.\n\nSection definitions (see \"Section configuration sections\" below) can be added\nto the config object with ``define_section``. When this is done, only the\ndefined keys will be available. A section can not be given more than one\ndefinition. The ``[core]`` section is defined with ``CoreSection`` when the\nobject is initialized.\n\n.. versionadded:: 6.0.0\n\"\"\"\n# Copyright 2012-2015, Elsie Powell, embolalia.com\n# Copyright \u00a9 2012, Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.tools import iteritems, stderr\nimport sopel.tools\nfrom sopel.tools import get_input\nimport sopel.loader\nimport os\nimport sys\nif sys.version_info.major < 3:\n import ConfigParser\nelse:\n basestring = str\n import configparser as ConfigParser\nimport sopel.config.core_section\nfrom sopel.config.types import StaticSection\n\n\nDEFAULT_HOMEDIR = os.path.join(os.path.expanduser('~'), '.sopel')\n\n\nclass ConfigurationError(Exception):\n \"\"\" Exception type for configuration errors \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return 'ConfigurationError: %s' % self.value\n\n\nclass Config(object):\n def __init__(self, filename, validate=True):\n \"\"\"The bot's configuration.\n\n The given filename will be associated with the configuration, and is\n the file which will be written if write() is called. If load is not\n given or True, the configuration object will load the attributes from\n the file at filename.\n\n A few default values will be set here if they are not defined in the\n config file, or a config file is not loaded. They are documented below.\n \"\"\"\n self.filename = filename\n \"\"\"The config object's associated file, as noted above.\"\"\"\n self.parser = ConfigParser.RawConfigParser(allow_no_value=True)\n self.parser.read(self.filename)\n self.define_section('core', sopel.config.core_section.CoreSection,\n validate=validate)\n self.get = self.parser.get\n\n @property\n def homedir(self):\n \"\"\"An alias to config.core.homedir\"\"\"\n # Technically it's the other way around, so we can bootstrap filename\n # attributes in the core section, but whatever.\n configured = None\n if self.parser.has_option('core', 'homedir'):\n configured = self.parser.get('core', 'homedir')\n if configured:\n return configured\n else:\n return os.path.dirname(self.filename)\n\n def save(self):\n \"\"\"Save all changes to the config file.\"\"\"\n cfgfile = open(self.filename, 'w')\n self.parser.write(cfgfile)\n cfgfile.flush()\n cfgfile.close()\n\n def add_section(self, name):\n \"\"\"Add a section to the config file.\n\n Returns ``False`` if already exists.\n \"\"\"\n try:\n return self.parser.add_section(name)\n except ConfigParser.DuplicateSectionError:\n return False\n\n def define_section(self, name, cls_, validate=True):\n \"\"\"Define the available settings in a section.\n\n ``cls_`` must be a subclass of ``StaticSection``. If the section has\n already been defined with a different class, ValueError is raised.\n\n If ``validate`` is True, the section's values will be validated, and an\n exception raised if they are invalid. This is desirable in a module's\n setup function, for example, but might not be in the configure function.\n \"\"\"\n if not issubclass(cls_, StaticSection):\n raise ValueError(\"Class must be a subclass of StaticSection.\")\n current = getattr(self, name, None)\n current_name = str(current.__class__)\n new_name = str(cls_)\n if (current is not None and not isinstance(current, self.ConfigSection) and\n not current_name == new_name):\n raise ValueError(\n \"Can not re-define class for section from {} to {}.\".format(\n current_name, new_name)\n )\n setattr(self, name, cls_(self, name, validate=validate))\n\n class ConfigSection(object):\n\n \"\"\"Represents a section of the config file.\n\n Contains all keys in thesection as attributes.\n\n \"\"\"\n\n def __init__(self, name, items, parent):\n object.__setattr__(self, '_name', name)\n object.__setattr__(self, '_parent', parent)\n for item in items:\n value = item[1].strip()\n if not value.lower() == 'none':\n if value.lower() == 'false':\n value = False\n object.__setattr__(self, item[0], value)\n\n def __getattr__(self, name):\n return None\n\n def __setattr__(self, name, value):\n object.__setattr__(self, name, value)\n if type(value) is list:\n value = ','.join(value)\n self._parent.parser.set(self._name, name, value)\n\n def get_list(self, name):\n value = getattr(self, name)\n if not value:\n return []\n if isinstance(value, basestring):\n value = value.split(',')\n # Keep the split value, so we don't have to keep doing this\n setattr(self, name, value)\n return value\n\n def __getattr__(self, name):\n if name in self.parser.sections():\n items = self.parser.items(name)\n section = self.ConfigSection(name, items, self) # Return a section\n setattr(self, name, section)\n return section\n else:\n raise AttributeError(\"%r object has no attribute %r\"\n % (type(self).__name__, name))\n\n def option(self, question, default=False):\n \"\"\"Ask \"y/n\" and return the corresponding boolean answer.\n\n Show user in terminal a \"y/n\" prompt, and return true or false based on\n the response. If default is passed as true, the default will be shown\n as ``[y]``, else it will be ``[n]``. ``question`` should be phrased as\n a question, but without a question mark at the end.\n\n \"\"\"\n d = 'n'\n if default:\n d = 'y'\n ans = get_input(question + ' (y/n)? [' + d + '] ')\n if not ans:\n ans = d\n return ans.lower() == 'y'\n\n def _modules(self):\n home = os.getcwd()\n modules_dir = os.path.join(home, 'modules')\n filenames = sopel.loader.enumerate_modules(self)\n os.sys.path.insert(0, modules_dir)\n for name, mod_spec in iteritems(filenames):\n path, type_ = mod_spec\n try:\n module, _ = sopel.loader.load_module(name, path, type_)\n except Exception as e:\n filename, lineno = sopel.tools.get_raising_file_and_line()\n rel_path = os.path.relpath(filename, os.path.dirname(__file__))\n raising_stmt = \"%s:%d\" % (rel_path, lineno)\n stderr(\"Error loading %s: %s (%s)\" % (name, e, raising_stmt))\n else:\n if hasattr(module, 'configure'):\n prompt = name + ' module'\n if module.__doc__:\n doc = module.__doc__.split('\\n', 1)[0]\n if doc:\n prompt = doc\n prompt = 'Configure {} (y/n)? [n]'.format(prompt)\n do_configure = get_input(prompt)\n do_configure = do_configure and do_configure.lower() == 'y'\n if do_configure:\n module.configure(self)\n self.save()\n\n\ndef _wizard(section, config=None):\n dotdir = os.path.expanduser('~/.sopel')\n configpath = os.path.join(dotdir, ((config or 'default.cfg') + ('.cfg' if config and not config.endswith('.cfg') else '')))\n if section == 'all':\n _create_config(configpath)\n elif section == 'mod':\n _check_dir(False)\n if not os.path.isfile(configpath):\n print(\"No config file found.\" +\n \" Please make one before configuring these options.\")\n sys.exit(1)\n config = Config(configpath, validate=False)\n config._modules()\n\n\ndef _check_dir(create=True):\n dotdir = os.path.join(os.path.expanduser('~'), '.sopel')\n if not os.path.isdir(dotdir):\n if create:\n print('Creating a config directory at ~/.sopel...')\n try:\n os.makedirs(dotdir)\n except Exception as e:\n print('There was a problem creating %s:' % dotdir, file=sys.stderr)\n print('%s, %s' % (e.__class__, str(e)), file=sys.stderr)\n print('Please fix this and then run Sopel again.', file=sys.stderr)\n sys.exit(1)\n else:\n print(\"No config file found. Please make one before configuring these options.\")\n sys.exit(1)\n\n\ndef _create_config(configpath):\n _check_dir()\n print(\"Please answer the following questions\" +\n \" to create your configuration file:\\n\")\n try:\n config = Config(configpath, validate=False)\n sopel.config.core_section.configure(config)\n if config.option(\n 'Would you like to see if there are any modules'\n ' that need configuring'\n ):\n config._modules()\n config.save()\n except Exception: # TODO: Be specific\n print(\"Encountered an error while writing the config file.\" +\n \" This shouldn't happen. Check permissions.\")\n raise\n\n print(\"Config file written successfully!\")\n return config.filename\n", "path": "sopel/config/__init__.py"}]}
| 3,535 | 490 |
gh_patches_debug_40285
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-3115
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[RFC] Show progress bar based on `timeout`
<!-- Please write a clear and concise description of the feature proposal. -->
## Motivation
The current [`study.optimize`](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) shows the progress bar when we specify the number of trials. We can show the progress bar based on the `timeout` value as well.
Note that before implementing this feature, we need to decide the priority of `n_trials` and `timeout`. When we specify both values as not `None` values, there are two ways to show the progress bar.
Moreover, we need to discuss the case when both `None`. I suppose not showing the progress bar makes sense when both are `None`.
## Description
<!-- Please write a detailed description of the new feature. -->
- Show progress bar using `timeout` value.
- Add test by following https://github.com/optuna/optuna/pull/2900
## Alternatives (optional)
<!-- Please write a clear and concise description of any alternative solutions or features you've considered. -->
## Additional context (optional)
We might need to support the `n_jobs!=1` case depending on https://github.com/optuna/optuna/issues/2957.
<!-- Please add any other context or screenshots about the feature request here. -->
</issue>
<code>
[start of optuna/progress_bar.py]
1 import logging
2 from typing import Any
3 from typing import Optional
4
5 from tqdm.auto import tqdm
6
7 from optuna import logging as optuna_logging
8 from optuna._experimental import experimental
9
10
11 _tqdm_handler: Optional["_TqdmLoggingHandler"] = None
12
13
14 # Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02
15 class _TqdmLoggingHandler(logging.StreamHandler):
16 def emit(self, record: Any) -> None:
17 try:
18 msg = self.format(record)
19 tqdm.write(msg)
20 self.flush()
21 except (KeyboardInterrupt, SystemExit):
22 raise
23 except Exception:
24 self.handleError(record)
25
26
27 class _ProgressBar(object):
28 """Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.
29
30 Args:
31 is_valid:
32 Whether to show progress bars in :func:`~optuna.study.Study.optimize`.
33 n_trials:
34 The number of trials.
35 timeout:
36 Stop study after the given number of second(s).
37 """
38
39 def __init__(
40 self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None
41 ) -> None:
42 self._is_valid = is_valid
43 self._n_trials = n_trials
44 self._timeout = timeout
45
46 if self._is_valid:
47 self._init_valid()
48
49 # TODO(hvy): Remove initialization indirection via this method when the progress bar is no
50 # longer experimental.
51 @experimental("1.2.0", name="Progress bar")
52 def _init_valid(self) -> None:
53 self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)
54 global _tqdm_handler
55
56 _tqdm_handler = _TqdmLoggingHandler()
57 _tqdm_handler.setLevel(logging.INFO)
58 _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
59 optuna_logging.disable_default_handler()
60 optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
61
62 def update(self, elapsed_seconds: Optional[float]) -> None:
63 """Update the progress bars if ``is_valid`` is :obj:`True`.
64
65 Args:
66 elapsed_seconds:
67 The time past since :func:`~optuna.study.Study.optimize` started.
68 """
69 if self._is_valid:
70 self._progress_bar.update(1)
71 if self._timeout is not None and elapsed_seconds is not None:
72 self._progress_bar.set_postfix_str(
73 "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
74 )
75
76 def close(self) -> None:
77 """Close progress bars."""
78 if self._is_valid:
79 self._progress_bar.close()
80 assert _tqdm_handler is not None
81 optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)
82 optuna_logging.enable_default_handler()
83
[end of optuna/progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py
--- a/optuna/progress_bar.py
+++ b/optuna/progress_bar.py
@@ -39,9 +39,11 @@
def __init__(
self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None
) -> None:
- self._is_valid = is_valid
+
+ self._is_valid = is_valid and (n_trials or timeout) is not None
self._n_trials = n_trials
self._timeout = timeout
+ self._last_elapsed_seconds = 0.0
if self._is_valid:
self._init_valid()
@@ -50,7 +52,21 @@
# longer experimental.
@experimental("1.2.0", name="Progress bar")
def _init_valid(self) -> None:
- self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)
+
+ if self._n_trials is not None:
+ self._progress_bar = tqdm(total=self._n_trials)
+
+ else:
+ fmt = "{percentage:3.0f}%|{bar}| {elapsed}/{desc}"
+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
+
+ # Using description string instead postfix string
+ # to display formatted timeout, since postfix carries
+ # extra comma space auto-format.
+ # https://github.com/tqdm/tqdm/issues/712
+ total = tqdm.format_interval(self._timeout)
+ self._progress_bar.set_description_str(total)
+
global _tqdm_handler
_tqdm_handler = _TqdmLoggingHandler()
@@ -59,22 +75,37 @@
optuna_logging.disable_default_handler()
optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
- def update(self, elapsed_seconds: Optional[float]) -> None:
+ def update(self, elapsed_seconds: float) -> None:
"""Update the progress bars if ``is_valid`` is :obj:`True`.
Args:
elapsed_seconds:
The time past since :func:`~optuna.study.Study.optimize` started.
"""
+
if self._is_valid:
- self._progress_bar.update(1)
- if self._timeout is not None and elapsed_seconds is not None:
- self._progress_bar.set_postfix_str(
- "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
- )
+ if self._n_trials is not None:
+ self._progress_bar.update(1)
+ if self._timeout is not None:
+ self._progress_bar.set_postfix_str(
+ "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
+ )
+
+ elif self._timeout is not None:
+ time_diff = elapsed_seconds - self._last_elapsed_seconds
+ if elapsed_seconds > self._timeout:
+ # Clip elapsed time to avoid tqdm warnings.
+ time_diff -= elapsed_seconds - self._timeout
+
+ self._progress_bar.update(time_diff)
+ self._last_elapsed_seconds = elapsed_seconds
+
+ else:
+ assert False
def close(self) -> None:
"""Close progress bars."""
+
if self._is_valid:
self._progress_bar.close()
assert _tqdm_handler is not None
|
{"golden_diff": "diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py\n--- a/optuna/progress_bar.py\n+++ b/optuna/progress_bar.py\n@@ -39,9 +39,11 @@\n def __init__(\n self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None\n ) -> None:\n- self._is_valid = is_valid\n+\n+ self._is_valid = is_valid and (n_trials or timeout) is not None\n self._n_trials = n_trials\n self._timeout = timeout\n+ self._last_elapsed_seconds = 0.0\n \n if self._is_valid:\n self._init_valid()\n@@ -50,7 +52,21 @@\n # longer experimental.\n @experimental(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n- self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)\n+\n+ if self._n_trials is not None:\n+ self._progress_bar = tqdm(total=self._n_trials)\n+\n+ else:\n+ fmt = \"{percentage:3.0f}%|{bar}| {elapsed}/{desc}\"\n+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n+\n+ # Using description string instead postfix string\n+ # to display formatted timeout, since postfix carries\n+ # extra comma space auto-format.\n+ # https://github.com/tqdm/tqdm/issues/712\n+ total = tqdm.format_interval(self._timeout)\n+ self._progress_bar.set_description_str(total)\n+\n global _tqdm_handler\n \n _tqdm_handler = _TqdmLoggingHandler()\n@@ -59,22 +75,37 @@\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n \n- def update(self, elapsed_seconds: Optional[float]) -> None:\n+ def update(self, elapsed_seconds: float) -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n \n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n \"\"\"\n+\n if self._is_valid:\n- self._progress_bar.update(1)\n- if self._timeout is not None and elapsed_seconds is not None:\n- self._progress_bar.set_postfix_str(\n- \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n- )\n+ if self._n_trials is not None:\n+ self._progress_bar.update(1)\n+ if self._timeout is not None:\n+ self._progress_bar.set_postfix_str(\n+ \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n+ )\n+\n+ elif self._timeout is not None:\n+ time_diff = elapsed_seconds - self._last_elapsed_seconds\n+ if elapsed_seconds > self._timeout:\n+ # Clip elapsed time to avoid tqdm warnings.\n+ time_diff -= elapsed_seconds - self._timeout\n+\n+ self._progress_bar.update(time_diff)\n+ self._last_elapsed_seconds = elapsed_seconds\n+\n+ else:\n+ assert False\n \n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n+\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n", "issue": "[RFC] Show progress bar based on `timeout`\n<!-- Please write a clear and concise description of the feature proposal. -->\r\n\r\n## Motivation\r\n\r\nThe current [`study.optimize`](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize) shows the progress bar when we specify the number of trials. We can show the progress bar based on the `timeout` value as well.\r\n\r\nNote that before implementing this feature, we need to decide the priority of `n_trials` and `timeout`. When we specify both values as not `None` values, there are two ways to show the progress bar. \r\n\r\nMoreover, we need to discuss the case when both `None`. I suppose not showing the progress bar makes sense when both are `None`.\r\n\r\n## Description\r\n\r\n<!-- Please write a detailed description of the new feature. -->\r\n\r\n\r\n- Show progress bar using `timeout` value.\r\n- Add test by following https://github.com/optuna/optuna/pull/2900\r\n\r\n## Alternatives (optional)\r\n\r\n\r\n<!-- Please write a clear and concise description of any alternative solutions or features you've considered. -->\r\n\r\n## Additional context (optional)\r\n\r\nWe might need to support the `n_jobs!=1` case depending on https://github.com/optuna/optuna/issues/2957.\r\n\r\n<!-- Please add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Optional\n\nfrom tqdm.auto import tqdm\n\nfrom optuna import logging as optuna_logging\nfrom optuna._experimental import experimental\n\n\n_tqdm_handler: Optional[\"_TqdmLoggingHandler\"] = None\n\n\n# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02\nclass _TqdmLoggingHandler(logging.StreamHandler):\n def emit(self, record: Any) -> None:\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n\n\nclass _ProgressBar(object):\n \"\"\"Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.\n\n Args:\n is_valid:\n Whether to show progress bars in :func:`~optuna.study.Study.optimize`.\n n_trials:\n The number of trials.\n timeout:\n Stop study after the given number of second(s).\n \"\"\"\n\n def __init__(\n self, is_valid: bool, n_trials: Optional[int] = None, timeout: Optional[float] = None\n ) -> None:\n self._is_valid = is_valid\n self._n_trials = n_trials\n self._timeout = timeout\n\n if self._is_valid:\n self._init_valid()\n\n # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n # longer experimental.\n @experimental(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n self._progress_bar = tqdm(range(self._n_trials) if self._n_trials is not None else None)\n global _tqdm_handler\n\n _tqdm_handler = _TqdmLoggingHandler()\n _tqdm_handler.setLevel(logging.INFO)\n _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n\n def update(self, elapsed_seconds: Optional[float]) -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n\n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n \"\"\"\n if self._is_valid:\n self._progress_bar.update(1)\n if self._timeout is not None and elapsed_seconds is not None:\n self._progress_bar.set_postfix_str(\n \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n )\n\n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)\n optuna_logging.enable_default_handler()\n", "path": "optuna/progress_bar.py"}]}
| 1,640 | 768 |
gh_patches_debug_27844
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-713
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: New geoaccessor for GeoSeries to return tuple of coordinates `(x, y)`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [ ] closes #xxxx
- [x] whatsnew entry
as title
```python
>>> import dtoolkit.geoaccessor
>>> import geopandas as gpd
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])
>>> s.xy
0 (1.0, 1.0)
1 (2.0, 2.0)
2 (3.0, 3.0)
dtype: object
```
</issue>
<code>
[start of dtoolkit/geoaccessor/geoseries/xy.py]
1 import geopandas as gpd
2 import pandas as pd
3
4 from dtoolkit.geoaccessor.register import register_geoseries_method
5
6
7 @register_geoseries_method
8 def xy(s: gpd.GeoSeries, /) -> pd.Series:
9 """
10 Return the x and y location of Point geometries in a GeoSeries.
11
12 Returns
13 -------
14 Series
15 tuple of x and y coordinates.
16
17 See Also
18 --------
19 geopandas.GeoSeries.x
20 geopandas.GeoSeries.y
21
22 Examples
23 --------
24 >>> import dtoolkit.geoaccessor
25 >>> import geopandas as gpd
26 >>> from shapely.geometry import Point
27 >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])
28 >>> s
29 0 POINT (1.00000 1.00000)
30 1 POINT (2.00000 2.00000)
31 2 POINT (3.00000 3.00000)
32 dtype: geometry
33 >>> s.xy()
34 0 (1.0, 1.0)
35 1 (2.0, 2.0)
36 2 (3.0, 3.0)
37 dtype: object
38 """
39
40 return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)
41
[end of dtoolkit/geoaccessor/geoseries/xy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dtoolkit/geoaccessor/geoseries/xy.py b/dtoolkit/geoaccessor/geoseries/xy.py
--- a/dtoolkit/geoaccessor/geoseries/xy.py
+++ b/dtoolkit/geoaccessor/geoseries/xy.py
@@ -5,14 +5,19 @@
@register_geoseries_method
-def xy(s: gpd.GeoSeries, /) -> pd.Series:
+def xy(s: gpd.GeoSeries, /, reverse: bool = False) -> pd.Series:
"""
Return the x and y location of Point geometries in a GeoSeries.
+ Parameters
+ ----------
+ reverse : bool, default False
+ If True, return (y, x) instead.
+
Returns
-------
Series
- tuple of x and y coordinates.
+ tuple of coordinate.
See Also
--------
@@ -24,17 +29,26 @@
>>> import dtoolkit.geoaccessor
>>> import geopandas as gpd
>>> from shapely.geometry import Point
- >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])
+ >>> s = gpd.GeoSeries([Point(0, 1), Point(0, 2), Point(0, 3)])
>>> s
- 0 POINT (1.00000 1.00000)
- 1 POINT (2.00000 2.00000)
- 2 POINT (3.00000 3.00000)
+ 0 POINT (0.00000 1.00000)
+ 1 POINT (0.00000 2.00000)
+ 2 POINT (0.00000 3.00000)
dtype: geometry
>>> s.xy()
- 0 (1.0, 1.0)
- 1 (2.0, 2.0)
- 2 (3.0, 3.0)
+ 0 (0.0, 1.0)
+ 1 (0.0, 2.0)
+ 2 (0.0, 3.0)
+ dtype: object
+
+ Set ``reverse=True`` to return (y, x).
+
+ >>> s.xy(True)
+ 0 (1.0, 0.0)
+ 1 (2.0, 0.0)
+ 2 (3.0, 0.0)
dtype: object
"""
- return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)
+ coordinates = (s.y, s.x) if reverse else (s.x, s.y)
+ return pd.concat(coordinates, axis=1).apply(tuple, axis=1)
|
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/xy.py b/dtoolkit/geoaccessor/geoseries/xy.py\n--- a/dtoolkit/geoaccessor/geoseries/xy.py\n+++ b/dtoolkit/geoaccessor/geoseries/xy.py\n@@ -5,14 +5,19 @@\n \n \n @register_geoseries_method\n-def xy(s: gpd.GeoSeries, /) -> pd.Series:\n+def xy(s: gpd.GeoSeries, /, reverse: bool = False) -> pd.Series:\n \"\"\"\n Return the x and y location of Point geometries in a GeoSeries.\n \n+ Parameters\n+ ----------\n+ reverse : bool, default False\n+ If True, return (y, x) instead.\n+\n Returns\n -------\n Series\n- tuple of x and y coordinates.\n+ tuple of coordinate.\n \n See Also\n --------\n@@ -24,17 +29,26 @@\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely.geometry import Point\n- >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\n+ >>> s = gpd.GeoSeries([Point(0, 1), Point(0, 2), Point(0, 3)])\n >>> s\n- 0 POINT (1.00000 1.00000)\n- 1 POINT (2.00000 2.00000)\n- 2 POINT (3.00000 3.00000)\n+ 0 POINT (0.00000 1.00000)\n+ 1 POINT (0.00000 2.00000)\n+ 2 POINT (0.00000 3.00000)\n dtype: geometry\n >>> s.xy()\n- 0 (1.0, 1.0)\n- 1 (2.0, 2.0)\n- 2 (3.0, 3.0)\n+ 0 (0.0, 1.0)\n+ 1 (0.0, 2.0)\n+ 2 (0.0, 3.0)\n+ dtype: object\n+\n+ Set ``reverse=True`` to return (y, x).\n+\n+ >>> s.xy(True)\n+ 0 (1.0, 0.0)\n+ 1 (2.0, 0.0)\n+ 2 (3.0, 0.0)\n dtype: object\n \"\"\"\n \n- return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)\n+ coordinates = (s.y, s.x) if reverse else (s.x, s.y)\n+ return pd.concat(coordinates, axis=1).apply(tuple, axis=1)\n", "issue": "ENH: New geoaccessor for GeoSeries to return tuple of coordinates `(x, y)`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nas title\r\n\r\n```python\r\n >>> import dtoolkit.geoaccessor\r\n >>> import geopandas as gpd\r\n >>> from shapely.geometry import Point\r\n >>> s = geopandas.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\r\n >>> s.xy\r\n 0 (1.0, 1.0)\r\n 1 (2.0, 2.0)\r\n 2 (3.0, 3.0)\r\n dtype: object\r\n```\n", "before_files": [{"content": "import geopandas as gpd\nimport pandas as pd\n\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef xy(s: gpd.GeoSeries, /) -> pd.Series:\n \"\"\"\n Return the x and y location of Point geometries in a GeoSeries.\n\n Returns\n -------\n Series\n tuple of x and y coordinates.\n\n See Also\n --------\n geopandas.GeoSeries.x\n geopandas.GeoSeries.y\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely.geometry import Point\n >>> s = gpd.GeoSeries([Point(1, 1), Point(2, 2), Point(3, 3)])\n >>> s\n 0 POINT (1.00000 1.00000)\n 1 POINT (2.00000 2.00000)\n 2 POINT (3.00000 3.00000)\n dtype: geometry\n >>> s.xy()\n 0 (1.0, 1.0)\n 1 (2.0, 2.0)\n 2 (3.0, 3.0)\n dtype: object\n \"\"\"\n\n return pd.concat((s.x, s.y), axis=1).apply(tuple, axis=1)\n", "path": "dtoolkit/geoaccessor/geoseries/xy.py"}]}
| 1,279 | 696 |
gh_patches_debug_26503
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2837
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookie totally breaks if the client sets a bare cookie
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
A cookie may be not in the `key=value` format. For example. if the JS code runs `document.cookie = "bad"`, it becomes:

I don't know how to call it. I will use the term "bare cookie" in this report. In the following requests with a bare cookie, the Cookie HTTP header becomes: `Cookie: key=value; bad`

It seems that Sanic cannot parse the header with bare cookies, and will throw all cookies (including the legimit `key=value` pair) away. See the code snippet below.
### Code snippet
```python
from sanic import Sanic
from sanic.response import html, text
app = Sanic("test")
app.config.AUTO_EXTEND = False
@app.get("/")
async def route1(request):
return html('<script>document.cookie="key=value"; document.cookie="bad"; location.href="/fire";</script>')
@app.get("/fire")
async def route2(request):
return text(f'''
headers = {request.headers.get("Cookie")}
key = {request.cookies.get("key", "none")}
''')
if __name__ == '__main__':
app.run(port=4321, debug=True)
```
Then visit `http://127.0.0.1:4321/` in Chrome. The page shows:
```
headers = key=value; bad
key = none
```
### Expected Behavior
The page should show:
```
headers = key=value; bad
key = value
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows
### Sanic Version
22.12.0
### Additional context
I am using the latest stable Chrome (117.0.5938.150) to reproduce this.
</issue>
<code>
[start of sanic/models/protocol_types.py]
1 from __future__ import annotations
2
3 import sys
4
5 from asyncio import BaseTransport
6 from typing import TYPE_CHECKING, Any, AnyStr, Optional
7
8
9 if TYPE_CHECKING:
10 from sanic.http.constants import HTTP
11 from sanic.models.asgi import ASGIScope
12
13
14 if sys.version_info < (3, 8):
15 Range = Any
16 HTMLProtocol = Any
17 else:
18 # Protocol is a 3.8+ feature
19 from typing import Protocol
20
21 class HTMLProtocol(Protocol):
22 def __html__(self) -> AnyStr:
23 ...
24
25 def _repr_html_(self) -> AnyStr:
26 ...
27
28 class Range(Protocol):
29 start: Optional[int]
30 end: Optional[int]
31 size: Optional[int]
32 total: Optional[int]
33 __slots__ = ()
34
35
36 class TransportProtocol(BaseTransport):
37 scope: ASGIScope
38 version: HTTP
39 __slots__ = ()
40
[end of sanic/models/protocol_types.py]
[start of sanic/cookies/request.py]
1 import re
2
3 from typing import Any, Dict, List, Optional
4
5 from sanic.cookies.response import Cookie
6 from sanic.log import deprecation
7 from sanic.request.parameters import RequestParameters
8
9
10 COOKIE_NAME_RESERVED_CHARS = re.compile(
11 '[\x00-\x1F\x7F-\xFF()<>@,;:\\\\"/[\\]?={} \x09]'
12 )
13 OCTAL_PATTERN = re.compile(r"\\[0-3][0-7][0-7]")
14 QUOTE_PATTERN = re.compile(r"[\\].")
15
16
17 def _unquote(str): # no cov
18 if str is None or len(str) < 2:
19 return str
20 if str[0] != '"' or str[-1] != '"':
21 return str
22
23 str = str[1:-1]
24
25 i = 0
26 n = len(str)
27 res = []
28 while 0 <= i < n:
29 o_match = OCTAL_PATTERN.search(str, i)
30 q_match = QUOTE_PATTERN.search(str, i)
31 if not o_match and not q_match:
32 res.append(str[i:])
33 break
34 # else:
35 j = k = -1
36 if o_match:
37 j = o_match.start(0)
38 if q_match:
39 k = q_match.start(0)
40 if q_match and (not o_match or k < j):
41 res.append(str[i:k])
42 res.append(str[k + 1])
43 i = k + 2
44 else:
45 res.append(str[i:j])
46 res.append(chr(int(str[j + 1 : j + 4], 8))) # noqa: E203
47 i = j + 4
48 return "".join(res)
49
50
51 def parse_cookie(raw: str) -> Dict[str, List[str]]:
52 """Parses a raw cookie string into a dictionary.
53
54 The function takes a raw cookie string (usually from HTTP headers) and
55 returns a dictionary where each key is a cookie name and the value is a
56 list of values for that cookie. The function handles quoted values and
57 skips invalid cookie names.
58
59 Args:
60 raw (str): The raw cookie string to be parsed.
61
62 Returns:
63 Dict[str, List[str]]: A dictionary containing the cookie names as keys
64 and a list of values for each cookie.
65
66 Example:
67 ```python
68 raw = 'name1=value1; name2="value2"; name3=value3'
69 cookies = parse_cookie(raw)
70 # cookies will be {'name1': ['value1'], 'name2': ['value2'], 'name3': ['value3']}
71 ```
72 """ # noqa: E501
73 cookies: Dict[str, List[str]] = {}
74
75 for token in raw.split(";"):
76 name, __, value = token.partition("=")
77 name = name.strip()
78 value = value.strip()
79
80 if not name:
81 continue
82
83 if COOKIE_NAME_RESERVED_CHARS.search(name): # no cov
84 continue
85
86 if len(value) > 2 and value[0] == '"' and value[-1] == '"': # no cov
87 value = _unquote(value)
88
89 if name in cookies:
90 cookies[name].append(value)
91 else:
92 cookies[name] = [value]
93
94 return cookies
95
96
97 class CookieRequestParameters(RequestParameters):
98 """A container for accessing single and multiple cookie values.
99
100 Because the HTTP standard allows for multiple cookies with the same name,
101 a standard dictionary cannot be used to access cookie values. This class
102 provides a way to access cookie values in a way that is similar to a
103 dictionary, but also allows for accessing multiple values for a single
104 cookie name when necessary.
105
106 Args:
107 cookies (Dict[str, List[str]]): A dictionary containing the cookie
108 names as keys and a list of values for each cookie.
109
110 Example:
111 ```python
112 raw = 'name1=value1; name2="value2"; name3=value3'
113 cookies = parse_cookie(raw)
114 # cookies will be {'name1': ['value1'], 'name2': ['value2'], 'name3': ['value3']}
115
116 request_cookies = CookieRequestParameters(cookies)
117 request_cookies['name1'] # 'value1'
118 request_cookies.get('name1') # 'value1'
119 request_cookies.getlist('name1') # ['value1']
120 ```
121 """ # noqa: E501
122
123 def __getitem__(self, key: str) -> Optional[str]:
124 deprecation(
125 f"You are accessing cookie key '{key}', which is currently in "
126 "compat mode returning a single cookie value. Starting in v24.3 "
127 "accessing a cookie value like this will return a list of values. "
128 "To avoid this behavior and continue accessing a single value, "
129 f"please upgrade from request.cookies['{key}'] to "
130 f"request.cookies.get('{key}'). See more details: "
131 "https://sanic.dev/en/guide/release-notes/v23.3.html#request-cookies", # noqa
132 24.3,
133 )
134 try:
135 value = self._get_prefixed_cookie(key)
136 except KeyError:
137 value = super().__getitem__(key)
138 return value[0]
139
140 def __getattr__(self, key: str) -> str:
141 if key.startswith("_"):
142 return self.__getattribute__(key)
143 key = key.rstrip("_").replace("_", "-")
144 return str(self.get(key, ""))
145
146 def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]:
147 try:
148 return self._get_prefixed_cookie(name)[0]
149 except KeyError:
150 return super().get(name, default)
151
152 def getlist(
153 self, name: str, default: Optional[Any] = None
154 ) -> Optional[Any]:
155 try:
156 return self._get_prefixed_cookie(name)
157 except KeyError:
158 return super().getlist(name, default)
159
160 def _get_prefixed_cookie(self, name: str) -> Any:
161 getitem = super().__getitem__
162 try:
163 return getitem(f"{Cookie.HOST_PREFIX}{name}")
164 except KeyError:
165 return getitem(f"{Cookie.SECURE_PREFIX}{name}")
166
[end of sanic/cookies/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/cookies/request.py b/sanic/cookies/request.py
--- a/sanic/cookies/request.py
+++ b/sanic/cookies/request.py
@@ -73,12 +73,17 @@
cookies: Dict[str, List[str]] = {}
for token in raw.split(";"):
- name, __, value = token.partition("=")
+ name, sep, value = token.partition("=")
name = name.strip()
value = value.strip()
- if not name:
- continue
+ # Support cookies =value or plain value with no name
+ # https://github.com/httpwg/http-extensions/issues/159
+ if not sep:
+ if not name:
+ # Empty value like ;; or a cookie header with no value
+ continue
+ name, value = "", name
if COOKIE_NAME_RESERVED_CHARS.search(name): # no cov
continue
diff --git a/sanic/models/protocol_types.py b/sanic/models/protocol_types.py
--- a/sanic/models/protocol_types.py
+++ b/sanic/models/protocol_types.py
@@ -3,7 +3,7 @@
import sys
from asyncio import BaseTransport
-from typing import TYPE_CHECKING, Any, AnyStr, Optional
+from typing import TYPE_CHECKING, Any, Optional, Union
if TYPE_CHECKING:
@@ -19,10 +19,10 @@
from typing import Protocol
class HTMLProtocol(Protocol):
- def __html__(self) -> AnyStr:
+ def __html__(self) -> Union[str, bytes]:
...
- def _repr_html_(self) -> AnyStr:
+ def _repr_html_(self) -> Union[str, bytes]:
...
class Range(Protocol):
|
{"golden_diff": "diff --git a/sanic/cookies/request.py b/sanic/cookies/request.py\n--- a/sanic/cookies/request.py\n+++ b/sanic/cookies/request.py\n@@ -73,12 +73,17 @@\n cookies: Dict[str, List[str]] = {}\n \n for token in raw.split(\";\"):\n- name, __, value = token.partition(\"=\")\n+ name, sep, value = token.partition(\"=\")\n name = name.strip()\n value = value.strip()\n \n- if not name:\n- continue\n+ # Support cookies =value or plain value with no name\n+ # https://github.com/httpwg/http-extensions/issues/159\n+ if not sep:\n+ if not name:\n+ # Empty value like ;; or a cookie header with no value\n+ continue\n+ name, value = \"\", name\n \n if COOKIE_NAME_RESERVED_CHARS.search(name): # no cov\n continue\ndiff --git a/sanic/models/protocol_types.py b/sanic/models/protocol_types.py\n--- a/sanic/models/protocol_types.py\n+++ b/sanic/models/protocol_types.py\n@@ -3,7 +3,7 @@\n import sys\n \n from asyncio import BaseTransport\n-from typing import TYPE_CHECKING, Any, AnyStr, Optional\n+from typing import TYPE_CHECKING, Any, Optional, Union\n \n \n if TYPE_CHECKING:\n@@ -19,10 +19,10 @@\n from typing import Protocol\n \n class HTMLProtocol(Protocol):\n- def __html__(self) -> AnyStr:\n+ def __html__(self) -> Union[str, bytes]:\n ...\n \n- def _repr_html_(self) -> AnyStr:\n+ def _repr_html_(self) -> Union[str, bytes]:\n ...\n \n class Range(Protocol):\n", "issue": "Cookie totally breaks if the client sets a bare cookie\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Describe the bug\r\n\r\nA cookie may be not in the `key=value` format. For example. if the JS code runs `document.cookie = \"bad\"`, it becomes:\r\n\r\n\r\n\r\nI don't know how to call it. I will use the term \"bare cookie\" in this report. In the following requests with a bare cookie, the Cookie HTTP header becomes: `Cookie: key=value; bad`\r\n\r\n\r\n\r\nIt seems that Sanic cannot parse the header with bare cookies, and will throw all cookies (including the legimit `key=value` pair) away. See the code snippet below.\r\n\r\n### Code snippet\r\n\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.response import html, text\r\n\r\napp = Sanic(\"test\")\r\napp.config.AUTO_EXTEND = False\r\n\r\[email protected](\"/\")\r\nasync def route1(request):\r\n return html('<script>document.cookie=\"key=value\"; document.cookie=\"bad\"; location.href=\"/fire\";</script>')\r\n \r\[email protected](\"/fire\")\r\nasync def route2(request):\r\n return text(f'''\r\n headers = {request.headers.get(\"Cookie\")}\r\n key = {request.cookies.get(\"key\", \"none\")}\r\n ''')\r\n\r\nif __name__ == '__main__':\r\n app.run(port=4321, debug=True)\r\n```\r\n\r\nThen visit `http://127.0.0.1:4321/` in Chrome. The page shows:\r\n\r\n```\r\nheaders = key=value; bad\r\nkey = none\r\n```\r\n\r\n### Expected Behavior\r\n\r\nThe page should show:\r\n\r\n```\r\nheaders = key=value; bad\r\nkey = value\r\n```\r\n\r\n### How do you run Sanic?\r\n\r\nAs a script (`app.run` or `Sanic.serve`)\r\n\r\n### Operating System\r\n\r\nWindows\r\n\r\n### Sanic Version\r\n\r\n22.12.0\r\n\r\n### Additional context\r\n\r\nI am using the latest stable Chrome (117.0.5938.150) to reproduce this.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\n\nfrom asyncio import BaseTransport\nfrom typing import TYPE_CHECKING, Any, AnyStr, Optional\n\n\nif TYPE_CHECKING:\n from sanic.http.constants import HTTP\n from sanic.models.asgi import ASGIScope\n\n\nif sys.version_info < (3, 8):\n Range = Any\n HTMLProtocol = Any\nelse:\n # Protocol is a 3.8+ feature\n from typing import Protocol\n\n class HTMLProtocol(Protocol):\n def __html__(self) -> AnyStr:\n ...\n\n def _repr_html_(self) -> AnyStr:\n ...\n\n class Range(Protocol):\n start: Optional[int]\n end: Optional[int]\n size: Optional[int]\n total: Optional[int]\n __slots__ = ()\n\n\nclass TransportProtocol(BaseTransport):\n scope: ASGIScope\n version: HTTP\n __slots__ = ()\n", "path": "sanic/models/protocol_types.py"}, {"content": "import re\n\nfrom typing import Any, Dict, List, Optional\n\nfrom sanic.cookies.response import Cookie\nfrom sanic.log import deprecation\nfrom sanic.request.parameters import RequestParameters\n\n\nCOOKIE_NAME_RESERVED_CHARS = re.compile(\n '[\\x00-\\x1F\\x7F-\\xFF()<>@,;:\\\\\\\\\"/[\\\\]?={} \\x09]'\n)\nOCTAL_PATTERN = re.compile(r\"\\\\[0-3][0-7][0-7]\")\nQUOTE_PATTERN = re.compile(r\"[\\\\].\")\n\n\ndef _unquote(str): # no cov\n if str is None or len(str) < 2:\n return str\n if str[0] != '\"' or str[-1] != '\"':\n return str\n\n str = str[1:-1]\n\n i = 0\n n = len(str)\n res = []\n while 0 <= i < n:\n o_match = OCTAL_PATTERN.search(str, i)\n q_match = QUOTE_PATTERN.search(str, i)\n if not o_match and not q_match:\n res.append(str[i:])\n break\n # else:\n j = k = -1\n if o_match:\n j = o_match.start(0)\n if q_match:\n k = q_match.start(0)\n if q_match and (not o_match or k < j):\n res.append(str[i:k])\n res.append(str[k + 1])\n i = k + 2\n else:\n res.append(str[i:j])\n res.append(chr(int(str[j + 1 : j + 4], 8))) # noqa: E203\n i = j + 4\n return \"\".join(res)\n\n\ndef parse_cookie(raw: str) -> Dict[str, List[str]]:\n \"\"\"Parses a raw cookie string into a dictionary.\n\n The function takes a raw cookie string (usually from HTTP headers) and\n returns a dictionary where each key is a cookie name and the value is a\n list of values for that cookie. The function handles quoted values and\n skips invalid cookie names.\n\n Args:\n raw (str): The raw cookie string to be parsed.\n\n Returns:\n Dict[str, List[str]]: A dictionary containing the cookie names as keys\n and a list of values for each cookie.\n\n Example:\n ```python\n raw = 'name1=value1; name2=\"value2\"; name3=value3'\n cookies = parse_cookie(raw)\n # cookies will be {'name1': ['value1'], 'name2': ['value2'], 'name3': ['value3']}\n ```\n \"\"\" # noqa: E501\n cookies: Dict[str, List[str]] = {}\n\n for token in raw.split(\";\"):\n name, __, value = token.partition(\"=\")\n name = name.strip()\n value = value.strip()\n\n if not name:\n continue\n\n if COOKIE_NAME_RESERVED_CHARS.search(name): # no cov\n continue\n\n if len(value) > 2 and value[0] == '\"' and value[-1] == '\"': # no cov\n value = _unquote(value)\n\n if name in cookies:\n cookies[name].append(value)\n else:\n cookies[name] = [value]\n\n return cookies\n\n\nclass CookieRequestParameters(RequestParameters):\n \"\"\"A container for accessing single and multiple cookie values.\n\n Because the HTTP standard allows for multiple cookies with the same name,\n a standard dictionary cannot be used to access cookie values. This class\n provides a way to access cookie values in a way that is similar to a\n dictionary, but also allows for accessing multiple values for a single\n cookie name when necessary.\n\n Args:\n cookies (Dict[str, List[str]]): A dictionary containing the cookie\n names as keys and a list of values for each cookie.\n\n Example:\n ```python\n raw = 'name1=value1; name2=\"value2\"; name3=value3'\n cookies = parse_cookie(raw)\n # cookies will be {'name1': ['value1'], 'name2': ['value2'], 'name3': ['value3']}\n\n request_cookies = CookieRequestParameters(cookies)\n request_cookies['name1'] # 'value1'\n request_cookies.get('name1') # 'value1'\n request_cookies.getlist('name1') # ['value1']\n ```\n \"\"\" # noqa: E501\n\n def __getitem__(self, key: str) -> Optional[str]:\n deprecation(\n f\"You are accessing cookie key '{key}', which is currently in \"\n \"compat mode returning a single cookie value. Starting in v24.3 \"\n \"accessing a cookie value like this will return a list of values. \"\n \"To avoid this behavior and continue accessing a single value, \"\n f\"please upgrade from request.cookies['{key}'] to \"\n f\"request.cookies.get('{key}'). See more details: \"\n \"https://sanic.dev/en/guide/release-notes/v23.3.html#request-cookies\", # noqa\n 24.3,\n )\n try:\n value = self._get_prefixed_cookie(key)\n except KeyError:\n value = super().__getitem__(key)\n return value[0]\n\n def __getattr__(self, key: str) -> str:\n if key.startswith(\"_\"):\n return self.__getattribute__(key)\n key = key.rstrip(\"_\").replace(\"_\", \"-\")\n return str(self.get(key, \"\"))\n\n def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]:\n try:\n return self._get_prefixed_cookie(name)[0]\n except KeyError:\n return super().get(name, default)\n\n def getlist(\n self, name: str, default: Optional[Any] = None\n ) -> Optional[Any]:\n try:\n return self._get_prefixed_cookie(name)\n except KeyError:\n return super().getlist(name, default)\n\n def _get_prefixed_cookie(self, name: str) -> Any:\n getitem = super().__getitem__\n try:\n return getitem(f\"{Cookie.HOST_PREFIX}{name}\")\n except KeyError:\n return getitem(f\"{Cookie.SECURE_PREFIX}{name}\")\n", "path": "sanic/cookies/request.py"}]}
| 3,137 | 392 |
gh_patches_debug_18990
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-5916
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Port completion.models.listcategory to QRegularExpression
In `qutebrowser/completion/models/listcategory.py`, we use `QRegExp`, which is going to be removed in Qt 6:
```python
rx = QRegExp(val, Qt.CaseInsensitive)
self.setFilterRegExp(rx)
```
We should instead [port](https://doc-snapshots.qt.io/qt6-dev/qtcore-changes-qt6.html#regular-expression-classes) this to use [QRegularExpression](https://doc.qt.io/qt-5/qregularexpression.html) (or, perhaps, Python's `re`?) instead.
~~(Also, we should probably call `qtutils.ensure_valid(rx)`)~~ (done in `master` already)
cc @rcorre
</issue>
<code>
[start of qutebrowser/completion/models/listcategory.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completion category that uses a list of tuples as a data source."""
21
22 import re
23 from typing import Iterable, Tuple
24
25 from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp
26 from PyQt5.QtGui import QStandardItem, QStandardItemModel
27 from PyQt5.QtWidgets import QWidget
28
29 from qutebrowser.completion.models import util
30 from qutebrowser.utils import qtutils, log
31
32
33 class ListCategory(QSortFilterProxyModel):
34
35 """Expose a list of items as a category for the CompletionModel."""
36
37 def __init__(self,
38 name: str,
39 items: Iterable[Tuple[str, ...]],
40 sort: bool = True,
41 delete_func: util.DeleteFuncType = None,
42 parent: QWidget = None):
43 super().__init__(parent)
44 self.name = name
45 self.srcmodel = QStandardItemModel(parent=self)
46 self._pattern = ''
47 # ListCategory filters all columns
48 self.columns_to_filter = [0, 1, 2]
49 self.setFilterKeyColumn(-1)
50 for item in items:
51 self.srcmodel.appendRow([QStandardItem(x) for x in item])
52 self.setSourceModel(self.srcmodel)
53 self.delete_func = delete_func
54 self._sort = sort
55
56 def set_pattern(self, val):
57 """Setter for pattern.
58
59 Args:
60 val: The value to set.
61 """
62 self._pattern = val
63 val = re.sub(r' +', r' ', val) # See #1919
64 val = re.escape(val)
65 val = val.replace(r'\ ', '.*')
66 rx = QRegExp(val, Qt.CaseInsensitive)
67 qtutils.ensure_valid(rx)
68 self.setFilterRegExp(rx)
69 self.invalidate()
70 sortcol = 0
71 self.sort(sortcol)
72
73 def lessThan(self, lindex, rindex):
74 """Custom sorting implementation.
75
76 Prefers all items which start with self._pattern. Other than that, uses
77 normal Python string sorting.
78
79 Args:
80 lindex: The QModelIndex of the left item (*left* < right)
81 rindex: The QModelIndex of the right item (left < *right*)
82
83 Return:
84 True if left < right, else False
85 """
86 qtutils.ensure_valid(lindex)
87 qtutils.ensure_valid(rindex)
88
89 left = self.srcmodel.data(lindex)
90 right = self.srcmodel.data(rindex)
91
92 if left is None or right is None: # pragma: no cover
93 log.completion.warning("Got unexpected None value, "
94 "left={!r} right={!r} "
95 "lindex={!r} rindex={!r}"
96 .format(left, right, lindex, rindex))
97 return False
98
99 leftstart = left.startswith(self._pattern)
100 rightstart = right.startswith(self._pattern)
101
102 if leftstart and not rightstart:
103 return True
104 elif rightstart and not leftstart:
105 return False
106 elif self._sort:
107 return left < right
108 else:
109 return False
110
[end of qutebrowser/completion/models/listcategory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qutebrowser/completion/models/listcategory.py b/qutebrowser/completion/models/listcategory.py
--- a/qutebrowser/completion/models/listcategory.py
+++ b/qutebrowser/completion/models/listcategory.py
@@ -22,7 +22,7 @@
import re
from typing import Iterable, Tuple
-from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp
+from PyQt5.QtCore import QSortFilterProxyModel, QRegularExpression
from PyQt5.QtGui import QStandardItem, QStandardItemModel
from PyQt5.QtWidgets import QWidget
@@ -63,9 +63,9 @@
val = re.sub(r' +', r' ', val) # See #1919
val = re.escape(val)
val = val.replace(r'\ ', '.*')
- rx = QRegExp(val, Qt.CaseInsensitive)
+ rx = QRegularExpression(val, QRegularExpression.CaseInsensitiveOption)
qtutils.ensure_valid(rx)
- self.setFilterRegExp(rx)
+ self.setFilterRegularExpression(rx)
self.invalidate()
sortcol = 0
self.sort(sortcol)
|
{"golden_diff": "diff --git a/qutebrowser/completion/models/listcategory.py b/qutebrowser/completion/models/listcategory.py\n--- a/qutebrowser/completion/models/listcategory.py\n+++ b/qutebrowser/completion/models/listcategory.py\n@@ -22,7 +22,7 @@\n import re\n from typing import Iterable, Tuple\n \n-from PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp\n+from PyQt5.QtCore import QSortFilterProxyModel, QRegularExpression\n from PyQt5.QtGui import QStandardItem, QStandardItemModel\n from PyQt5.QtWidgets import QWidget\n \n@@ -63,9 +63,9 @@\n val = re.sub(r' +', r' ', val) # See #1919\n val = re.escape(val)\n val = val.replace(r'\\ ', '.*')\n- rx = QRegExp(val, Qt.CaseInsensitive)\n+ rx = QRegularExpression(val, QRegularExpression.CaseInsensitiveOption)\n qtutils.ensure_valid(rx)\n- self.setFilterRegExp(rx)\n+ self.setFilterRegularExpression(rx)\n self.invalidate()\n sortcol = 0\n self.sort(sortcol)\n", "issue": "Port completion.models.listcategory to QRegularExpression\nIn `qutebrowser/completion/models/listcategory.py`, we use `QRegExp`, which is going to be removed in Qt 6:\r\n\r\n```python\r\n rx = QRegExp(val, Qt.CaseInsensitive)\r\n self.setFilterRegExp(rx)\r\n```\r\n\r\nWe should instead [port](https://doc-snapshots.qt.io/qt6-dev/qtcore-changes-qt6.html#regular-expression-classes) this to use [QRegularExpression](https://doc.qt.io/qt-5/qregularexpression.html) (or, perhaps, Python's `re`?) instead.\r\n\r\n~~(Also, we should probably call `qtutils.ensure_valid(rx)`)~~ (done in `master` already)\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2020 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completion category that uses a list of tuples as a data source.\"\"\"\n\nimport re\nfrom typing import Iterable, Tuple\n\nfrom PyQt5.QtCore import Qt, QSortFilterProxyModel, QRegExp\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtWidgets import QWidget\n\nfrom qutebrowser.completion.models import util\nfrom qutebrowser.utils import qtutils, log\n\n\nclass ListCategory(QSortFilterProxyModel):\n\n \"\"\"Expose a list of items as a category for the CompletionModel.\"\"\"\n\n def __init__(self,\n name: str,\n items: Iterable[Tuple[str, ...]],\n sort: bool = True,\n delete_func: util.DeleteFuncType = None,\n parent: QWidget = None):\n super().__init__(parent)\n self.name = name\n self.srcmodel = QStandardItemModel(parent=self)\n self._pattern = ''\n # ListCategory filters all columns\n self.columns_to_filter = [0, 1, 2]\n self.setFilterKeyColumn(-1)\n for item in items:\n self.srcmodel.appendRow([QStandardItem(x) for x in item])\n self.setSourceModel(self.srcmodel)\n self.delete_func = delete_func\n self._sort = sort\n\n def set_pattern(self, val):\n \"\"\"Setter for pattern.\n\n Args:\n val: The value to set.\n \"\"\"\n self._pattern = val\n val = re.sub(r' +', r' ', val) # See #1919\n val = re.escape(val)\n val = val.replace(r'\\ ', '.*')\n rx = QRegExp(val, Qt.CaseInsensitive)\n qtutils.ensure_valid(rx)\n self.setFilterRegExp(rx)\n self.invalidate()\n sortcol = 0\n self.sort(sortcol)\n\n def lessThan(self, lindex, rindex):\n \"\"\"Custom sorting implementation.\n\n Prefers all items which start with self._pattern. Other than that, uses\n normal Python string sorting.\n\n Args:\n lindex: The QModelIndex of the left item (*left* < right)\n rindex: The QModelIndex of the right item (left < *right*)\n\n Return:\n True if left < right, else False\n \"\"\"\n qtutils.ensure_valid(lindex)\n qtutils.ensure_valid(rindex)\n\n left = self.srcmodel.data(lindex)\n right = self.srcmodel.data(rindex)\n\n if left is None or right is None: # pragma: no cover\n log.completion.warning(\"Got unexpected None value, \"\n \"left={!r} right={!r} \"\n \"lindex={!r} rindex={!r}\"\n .format(left, right, lindex, rindex))\n return False\n\n leftstart = left.startswith(self._pattern)\n rightstart = right.startswith(self._pattern)\n\n if leftstart and not rightstart:\n return True\n elif rightstart and not leftstart:\n return False\n elif self._sort:\n return left < right\n else:\n return False\n", "path": "qutebrowser/completion/models/listcategory.py"}]}
| 1,787 | 245 |
gh_patches_debug_1842
|
rasdani/github-patches
|
git_diff
|
DistrictDataLabs__yellowbrick-1162
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pytest-runner is deprecated
pytest-runner is deprecated: https://github.com/pytest-dev/pytest-runner/#deprecation-notice
If I find time, then I can make a PR, but I thought I'd let you know in the meantime.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # setup
3 # Setup script for installing yellowbrick
4 #
5 # Author: Benjamin Bengfort
6 # Created: Wed May 18 14:33:26 2016 -0400
7 #
8 # Copyright (C) 2016 The scikit-yb developers
9 # For license information, see LICENSE.txt and NOTICE.md
10 #
11 # ID: setup.py [c4f3ba7] [email protected] $
12
13 """
14 Setup script for installing yellowbrick.
15 See http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html
16 """
17
18 ##########################################################################
19 ## Imports
20 ##########################################################################
21
22 import os
23 import codecs
24
25 from setuptools import setup
26 from setuptools import find_packages
27
28 ##########################################################################
29 ## Package Information
30 ##########################################################################
31
32 ## Basic information
33 ## Basic information
34 NAME = "yellowbrick"
35 DESCRIPTION = "A suite of visual analysis and diagnostic tools for machine learning."
36 AUTHOR = "The scikit-yb developers"
37 EMAIL = "[email protected]"
38 MAINTAINER = "The scikit-yb developers"
39 LICENSE = "Apache 2"
40 REPOSITORY = "https://github.com/DistrictDataLabs/yellowbrick"
41 PACKAGE = "yellowbrick"
42 URL = "http://scikit-yb.org/"
43
44 ## Define the keywords
45 KEYWORDS = (
46 "visualization",
47 "machine learning",
48 "scikit-learn",
49 "matplotlib",
50 "data science",
51 )
52
53 ## Define the classifiers
54 ## See https://pypi.python.org/pypi?%3Aaction=list_classifiers
55 CLASSIFIERS = (
56 "Development Status :: 5 - Production/Stable",
57 "Intended Audience :: Developers",
58 "Intended Audience :: Science/Research",
59 "License :: OSI Approved :: Apache Software License",
60 "Natural Language :: English",
61 "Operating System :: OS Independent",
62 "Programming Language :: Python",
63 "Programming Language :: Python :: 3.5",
64 "Programming Language :: Python :: 3.6",
65 "Programming Language :: Python :: 3.7",
66 "Programming Language :: Python :: 3.8",
67 "Topic :: Software Development",
68 "Topic :: Software Development :: Libraries :: Python Modules",
69 "Topic :: Scientific/Engineering :: Visualization",
70 )
71
72 ## Important Paths
73 PROJECT = os.path.abspath(os.path.dirname(__file__))
74 REQUIRE_PATH = "requirements.txt"
75 VERSION_PATH = os.path.join(PACKAGE, "version.py")
76 PKG_DESCRIBE = "DESCRIPTION.md"
77
78 ## Directories to ignore in find_packages
79 EXCLUDES = (
80 "tests", "tests.*",
81 "bin",
82 "docs", "docs.*",
83 "fixtures",
84 "register",
85 "notebooks", "notebooks.*",
86 "examples", "examples.*",
87 "binder", "binder.*",
88 "paper",
89 )
90
91 ##########################################################################
92 ## Helper Functions
93 ##########################################################################
94
95
96 def read(*parts):
97 """
98 Assume UTF-8 encoding and return the contents of the file located at the
99 absolute path from the REPOSITORY joined with *parts.
100 """
101 with codecs.open(os.path.join(PROJECT, *parts), "rb", "utf-8") as f:
102 return f.read()
103
104
105 def get_version(path=VERSION_PATH):
106 """
107 Reads the python file defined in the VERSION_PATH to find the get_version
108 function, and executes it to ensure that it is loaded correctly. Separating
109 the version in this way ensures no additional code is executed.
110 """
111 namespace = {}
112 exec(read(path), namespace)
113 return namespace["get_version"](short=True)
114
115
116 def get_requires(path=REQUIRE_PATH):
117 """
118 Yields a generator of requirements as defined by the REQUIRE_PATH which
119 should point to a requirements.txt output by `pip freeze`.
120 """
121 for line in read(path).splitlines():
122 line = line.strip()
123 if line and not line.startswith("#"):
124 yield line
125
126
127 def get_description_type(path=PKG_DESCRIBE):
128 """
129 Returns the long_description_content_type based on the extension of the
130 package describe path (e.g. .txt, .rst, or .md).
131 """
132 _, ext = os.path.splitext(path)
133 return {".rst": "text/x-rst", ".txt": "text/plain", ".md": "text/markdown"}[ext]
134
135
136 ##########################################################################
137 ## Define the configuration
138 ##########################################################################
139
140 config = {
141 "name": NAME,
142 "version": get_version(),
143 "description": DESCRIPTION,
144 "long_description": read(PKG_DESCRIBE),
145 "long_description_content_type": get_description_type(PKG_DESCRIBE),
146 "classifiers": CLASSIFIERS,
147 "keywords": KEYWORDS,
148 "license": LICENSE,
149 "author": AUTHOR,
150 "author_email": EMAIL,
151 "url": URL,
152 "maintainer": MAINTAINER,
153 "maintainer_email": EMAIL,
154 "project_urls": {
155 "Documentation": URL,
156 "Download": "{}/tarball/v{}".format(REPOSITORY, get_version()),
157 "Source": REPOSITORY,
158 "Tracker": "{}/issues".format(REPOSITORY),
159 },
160 "download_url": "{}/tarball/v{}".format(REPOSITORY, get_version()),
161 "packages": find_packages(where=PROJECT, exclude=EXCLUDES),
162 "package_data": {"yellowbrick": ["datasets/manifest.json"]},
163 "zip_safe": False,
164 "entry_points": {"console_scripts": []},
165 "install_requires": list(get_requires()),
166 "python_requires": ">=3.4, <4",
167 "setup_requires": ["pytest-runner"],
168 "tests_require": ["pytest"],
169 }
170
171
172 ##########################################################################
173 ## Run setup script
174 ##########################################################################
175
176 if __name__ == "__main__":
177 setup(**config)
178
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -163,9 +163,7 @@
"zip_safe": False,
"entry_points": {"console_scripts": []},
"install_requires": list(get_requires()),
- "python_requires": ">=3.4, <4",
- "setup_requires": ["pytest-runner"],
- "tests_require": ["pytest"],
+ "python_requires": ">=3.4, <4"
}
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -163,9 +163,7 @@\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n- \"python_requires\": \">=3.4, <4\",\n- \"setup_requires\": [\"pytest-runner\"],\n- \"tests_require\": [\"pytest\"],\n+ \"python_requires\": \">=3.4, <4\"\n }\n", "issue": "pytest-runner is deprecated\npytest-runner is deprecated: https://github.com/pytest-dev/pytest-runner/#deprecation-notice\r\n\r\nIf I find time, then I can make a PR, but I thought I'd let you know in the meantime.\n", "before_files": [{"content": "#!/usr/bin/env python\n# setup\n# Setup script for installing yellowbrick\n#\n# Author: Benjamin Bengfort\n# Created: Wed May 18 14:33:26 2016 -0400\n#\n# Copyright (C) 2016 The scikit-yb developers\n# For license information, see LICENSE.txt and NOTICE.md\n#\n# ID: setup.py [c4f3ba7] [email protected] $\n\n\"\"\"\nSetup script for installing yellowbrick.\nSee http://bbengfort.github.io/programmer/2016/01/20/packaging-with-pypi.html\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport codecs\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n##########################################################################\n## Package Information\n##########################################################################\n\n## Basic information\n## Basic information\nNAME = \"yellowbrick\"\nDESCRIPTION = \"A suite of visual analysis and diagnostic tools for machine learning.\"\nAUTHOR = \"The scikit-yb developers\"\nEMAIL = \"[email protected]\"\nMAINTAINER = \"The scikit-yb developers\"\nLICENSE = \"Apache 2\"\nREPOSITORY = \"https://github.com/DistrictDataLabs/yellowbrick\"\nPACKAGE = \"yellowbrick\"\nURL = \"http://scikit-yb.org/\"\n\n## Define the keywords\nKEYWORDS = (\n \"visualization\",\n \"machine learning\",\n \"scikit-learn\",\n \"matplotlib\",\n \"data science\",\n)\n\n## Define the classifiers\n## See https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = (\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n)\n\n## Important Paths\nPROJECT = os.path.abspath(os.path.dirname(__file__))\nREQUIRE_PATH = \"requirements.txt\"\nVERSION_PATH = os.path.join(PACKAGE, \"version.py\")\nPKG_DESCRIBE = \"DESCRIPTION.md\"\n\n## Directories to ignore in find_packages\nEXCLUDES = (\n \"tests\", \"tests.*\",\n \"bin\",\n \"docs\", \"docs.*\",\n \"fixtures\",\n \"register\",\n \"notebooks\", \"notebooks.*\",\n \"examples\", \"examples.*\",\n \"binder\", \"binder.*\",\n \"paper\",\n)\n\n##########################################################################\n## Helper Functions\n##########################################################################\n\n\ndef read(*parts):\n \"\"\"\n Assume UTF-8 encoding and return the contents of the file located at the\n absolute path from the REPOSITORY joined with *parts.\n \"\"\"\n with codecs.open(os.path.join(PROJECT, *parts), \"rb\", \"utf-8\") as f:\n return f.read()\n\n\ndef get_version(path=VERSION_PATH):\n \"\"\"\n Reads the python file defined in the VERSION_PATH to find the get_version\n function, and executes it to ensure that it is loaded correctly. Separating\n the version in this way ensures no additional code is executed.\n \"\"\"\n namespace = {}\n exec(read(path), namespace)\n return namespace[\"get_version\"](short=True)\n\n\ndef get_requires(path=REQUIRE_PATH):\n \"\"\"\n Yields a generator of requirements as defined by the REQUIRE_PATH which\n should point to a requirements.txt output by `pip freeze`.\n \"\"\"\n for line in read(path).splitlines():\n line = line.strip()\n if line and not line.startswith(\"#\"):\n yield line\n\n\ndef get_description_type(path=PKG_DESCRIBE):\n \"\"\"\n Returns the long_description_content_type based on the extension of the\n package describe path (e.g. .txt, .rst, or .md).\n \"\"\"\n _, ext = os.path.splitext(path)\n return {\".rst\": \"text/x-rst\", \".txt\": \"text/plain\", \".md\": \"text/markdown\"}[ext]\n\n\n##########################################################################\n## Define the configuration\n##########################################################################\n\nconfig = {\n \"name\": NAME,\n \"version\": get_version(),\n \"description\": DESCRIPTION,\n \"long_description\": read(PKG_DESCRIBE),\n \"long_description_content_type\": get_description_type(PKG_DESCRIBE),\n \"classifiers\": CLASSIFIERS,\n \"keywords\": KEYWORDS,\n \"license\": LICENSE,\n \"author\": AUTHOR,\n \"author_email\": EMAIL,\n \"url\": URL,\n \"maintainer\": MAINTAINER,\n \"maintainer_email\": EMAIL,\n \"project_urls\": {\n \"Documentation\": URL,\n \"Download\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"Source\": REPOSITORY,\n \"Tracker\": \"{}/issues\".format(REPOSITORY),\n },\n \"download_url\": \"{}/tarball/v{}\".format(REPOSITORY, get_version()),\n \"packages\": find_packages(where=PROJECT, exclude=EXCLUDES),\n \"package_data\": {\"yellowbrick\": [\"datasets/manifest.json\"]},\n \"zip_safe\": False,\n \"entry_points\": {\"console_scripts\": []},\n \"install_requires\": list(get_requires()),\n \"python_requires\": \">=3.4, <4\",\n \"setup_requires\": [\"pytest-runner\"],\n \"tests_require\": [\"pytest\"],\n}\n\n\n##########################################################################\n## Run setup script\n##########################################################################\n\nif __name__ == \"__main__\":\n setup(**config)\n", "path": "setup.py"}]}
| 2,256 | 111 |
gh_patches_debug_36242
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1756
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve `ignite.contirb.metrics` implementation
For these metrics in `ignite.contrib.metrics` :
- [Average Precision](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/average_precision.py)
- [Precision Recall Curve](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/precision_recall_curve.py)
- [Roc Auc](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/roc_auc.py)
We need to make the implementation simpler. Similar to the one here #1690. New arg `device` will be added, and we need to add the necessary distributed tests as well. See here for the tests https://github.com/pytorch/ignite/blob/master/tests/ignite/contrib/metrics/regression/test_canberra_metric.py#L99
</issue>
<code>
[start of ignite/contrib/metrics/average_precision.py]
1 from typing import Callable
2
3 import torch
4
5 from ignite.metrics import EpochMetric
6
7
8 def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
9 try:
10 from sklearn.metrics import average_precision_score
11 except ImportError:
12 raise RuntimeError("This contrib module requires sklearn to be installed.")
13
14 y_true = y_targets.numpy()
15 y_pred = y_preds.numpy()
16 return average_precision_score(y_true, y_pred)
17
18
19 class AveragePrecision(EpochMetric):
20 """Computes Average Precision accumulating predictions and the ground-truth during an epoch
21 and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/
22 sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .
23
24 Args:
25 output_transform: a callable that is used to transform the
26 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
27 form expected by the metric. This can be useful if, for example, you have a multi-output model and
28 you want to compute the metric with respect to one of the outputs.
29 check_compute_fn: Default False. If True, `average_precision_score
30 <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
31 #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are
32 no issues. User will be warned in case there are any issues computing the function.
33
34 AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
35 confidence values. To apply an activation to y_pred, use output_transform as shown below:
36
37 .. code-block:: python
38
39 def activated_output_transform(output):
40 y_pred, y = output
41 y_pred = torch.softmax(y_pred, dim=1)
42 return y_pred, y
43
44 avg_precision = AveragePrecision(activated_output_transform)
45
46 """
47
48 def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
49 super(AveragePrecision, self).__init__(
50 average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn
51 )
52
[end of ignite/contrib/metrics/average_precision.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py
--- a/ignite/contrib/metrics/average_precision.py
+++ b/ignite/contrib/metrics/average_precision.py
@@ -1,4 +1,4 @@
-from typing import Callable
+from typing import Callable, Optional, Union
import torch
@@ -6,13 +6,10 @@
def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:
- try:
- from sklearn.metrics import average_precision_score
- except ImportError:
- raise RuntimeError("This contrib module requires sklearn to be installed.")
+ from sklearn.metrics import average_precision_score
- y_true = y_targets.numpy()
- y_pred = y_preds.numpy()
+ y_true = y_targets.cpu().numpy()
+ y_pred = y_preds.cpu().numpy()
return average_precision_score(y_true, y_pred)
@@ -30,6 +27,7 @@
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
#sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.
+ device: optional device specification for internal storage.
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
confidence values. To apply an activation to y_pred, use output_transform as shown below:
@@ -45,7 +43,21 @@
"""
- def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ):
+
+ try:
+ from sklearn.metrics import average_precision_score
+ except ImportError:
+ raise RuntimeError("This contrib module requires sklearn to be installed.")
+
super(AveragePrecision, self).__init__(
- average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn
+ average_precision_compute_fn,
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
)
|
{"golden_diff": "diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py\n--- a/ignite/contrib/metrics/average_precision.py\n+++ b/ignite/contrib/metrics/average_precision.py\n@@ -1,4 +1,4 @@\n-from typing import Callable\n+from typing import Callable, Optional, Union\n \n import torch\n \n@@ -6,13 +6,10 @@\n \n \n def average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:\n- try:\n- from sklearn.metrics import average_precision_score\n- except ImportError:\n- raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n+ from sklearn.metrics import average_precision_score\n \n- y_true = y_targets.numpy()\n- y_pred = y_preds.numpy()\n+ y_true = y_targets.cpu().numpy()\n+ y_pred = y_preds.cpu().numpy()\n return average_precision_score(y_true, y_pred)\n \n \n@@ -30,6 +27,7 @@\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html\n #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n+ device: optional device specification for internal storage.\n \n AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n confidence values. To apply an activation to y_pred, use output_transform as shown below:\n@@ -45,7 +43,21 @@\n \n \"\"\"\n \n- def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n+ def __init__(\n+ self,\n+ output_transform: Callable = lambda x: x,\n+ check_compute_fn: bool = False,\n+ device: Union[str, torch.device] = torch.device(\"cpu\"),\n+ ):\n+\n+ try:\n+ from sklearn.metrics import average_precision_score\n+ except ImportError:\n+ raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n+\n super(AveragePrecision, self).__init__(\n- average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n+ average_precision_compute_fn,\n+ output_transform=output_transform,\n+ check_compute_fn=check_compute_fn,\n+ device=device,\n )\n", "issue": "Improve `ignite.contirb.metrics` implementation \nFor these metrics in `ignite.contrib.metrics` :\r\n- [Average Precision](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/average_precision.py)\r\n- [Precision Recall Curve](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/precision_recall_curve.py)\r\n- [Roc Auc](https://github.com/pytorch/ignite/blob/master/ignite/contrib/metrics/roc_auc.py) \r\nWe need to make the implementation simpler. Similar to the one here #1690. New arg `device` will be added, and we need to add the necessary distributed tests as well. See here for the tests https://github.com/pytorch/ignite/blob/master/tests/ignite/contrib/metrics/regression/test_canberra_metric.py#L99\r\n\n", "before_files": [{"content": "from typing import Callable\n\nimport torch\n\nfrom ignite.metrics import EpochMetric\n\n\ndef average_precision_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> float:\n try:\n from sklearn.metrics import average_precision_score\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n y_pred = y_preds.numpy()\n return average_precision_score(y_true, y_pred)\n\n\nclass AveragePrecision(EpochMetric):\n \"\"\"Computes Average Precision accumulating predictions and the ground-truth during an epoch\n and applying `sklearn.metrics.average_precision_score <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `average_precision_score\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html\n #sklearn.metrics.average_precision_score>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or\n confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def activated_output_transform(output):\n y_pred, y = output\n y_pred = torch.softmax(y_pred, dim=1)\n return y_pred, y\n\n avg_precision = AveragePrecision(activated_output_transform)\n\n \"\"\"\n\n def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n super(AveragePrecision, self).__init__(\n average_precision_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n )\n", "path": "ignite/contrib/metrics/average_precision.py"}]}
| 1,301 | 533 |
gh_patches_debug_24410
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-977
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`minimum_pre_commit_version` at the top level in configuration
Similar to the one that's currently allowed on individual hooks, allow this at the top level of the `.pre-commit-config.yaml`
</issue>
<code>
[start of pre_commit/clientlib.py]
1 from __future__ import absolute_import
2 from __future__ import unicode_literals
3
4 import argparse
5 import functools
6 import pipes
7 import sys
8
9 import cfgv
10 from aspy.yaml import ordered_load
11 from identify.identify import ALL_TAGS
12
13 import pre_commit.constants as C
14 from pre_commit.error_handler import FatalError
15 from pre_commit.languages.all import all_languages
16
17
18 def check_type_tag(tag):
19 if tag not in ALL_TAGS:
20 raise cfgv.ValidationError(
21 'Type tag {!r} is not recognized. '
22 'Try upgrading identify and pre-commit?'.format(tag),
23 )
24
25
26 def _make_argparser(filenames_help):
27 parser = argparse.ArgumentParser()
28 parser.add_argument('filenames', nargs='*', help=filenames_help)
29 parser.add_argument('-V', '--version', action='version', version=C.VERSION)
30 return parser
31
32
33 MANIFEST_HOOK_DICT = cfgv.Map(
34 'Hook', 'id',
35
36 cfgv.Required('id', cfgv.check_string),
37 cfgv.Required('name', cfgv.check_string),
38 cfgv.Required('entry', cfgv.check_string),
39 cfgv.Required('language', cfgv.check_one_of(all_languages)),
40 cfgv.Optional('alias', cfgv.check_string, ''),
41
42 cfgv.Optional(
43 'files', cfgv.check_and(cfgv.check_string, cfgv.check_regex), '',
44 ),
45 cfgv.Optional(
46 'exclude', cfgv.check_and(cfgv.check_string, cfgv.check_regex), '^$',
47 ),
48 cfgv.Optional('types', cfgv.check_array(check_type_tag), ['file']),
49 cfgv.Optional('exclude_types', cfgv.check_array(check_type_tag), []),
50
51 cfgv.Optional(
52 'additional_dependencies', cfgv.check_array(cfgv.check_string), [],
53 ),
54 cfgv.Optional('args', cfgv.check_array(cfgv.check_string), []),
55 cfgv.Optional('always_run', cfgv.check_bool, False),
56 cfgv.Optional('pass_filenames', cfgv.check_bool, True),
57 cfgv.Optional('description', cfgv.check_string, ''),
58 cfgv.Optional('language_version', cfgv.check_string, C.DEFAULT),
59 cfgv.Optional('log_file', cfgv.check_string, ''),
60 cfgv.Optional('minimum_pre_commit_version', cfgv.check_string, '0'),
61 cfgv.Optional('require_serial', cfgv.check_bool, False),
62 cfgv.Optional('stages', cfgv.check_array(cfgv.check_one_of(C.STAGES)), []),
63 cfgv.Optional('verbose', cfgv.check_bool, False),
64 )
65 MANIFEST_SCHEMA = cfgv.Array(MANIFEST_HOOK_DICT)
66
67
68 class InvalidManifestError(FatalError):
69 pass
70
71
72 load_manifest = functools.partial(
73 cfgv.load_from_filename,
74 schema=MANIFEST_SCHEMA,
75 load_strategy=ordered_load,
76 exc_tp=InvalidManifestError,
77 )
78
79
80 def validate_manifest_main(argv=None):
81 parser = _make_argparser('Manifest filenames.')
82 args = parser.parse_args(argv)
83 ret = 0
84 for filename in args.filenames:
85 try:
86 load_manifest(filename)
87 except InvalidManifestError as e:
88 print(e)
89 ret = 1
90 return ret
91
92
93 LOCAL = 'local'
94 META = 'meta'
95
96
97 class MigrateShaToRev(object):
98 @staticmethod
99 def _cond(key):
100 return cfgv.Conditional(
101 key, cfgv.check_string,
102 condition_key='repo',
103 condition_value=cfgv.NotIn(LOCAL, META),
104 ensure_absent=True,
105 )
106
107 def check(self, dct):
108 if dct.get('repo') in {LOCAL, META}:
109 self._cond('rev').check(dct)
110 self._cond('sha').check(dct)
111 elif 'sha' in dct and 'rev' in dct:
112 raise cfgv.ValidationError('Cannot specify both sha and rev')
113 elif 'sha' in dct:
114 self._cond('sha').check(dct)
115 else:
116 self._cond('rev').check(dct)
117
118 def apply_default(self, dct):
119 if 'sha' in dct:
120 dct['rev'] = dct.pop('sha')
121
122 def remove_default(self, dct):
123 pass
124
125
126 def _entry(modname):
127 """the hook `entry` is passed through `shlex.split()` by the command
128 runner, so to prevent issues with spaces and backslashes (on Windows)
129 it must be quoted here.
130 """
131 return '{} -m pre_commit.meta_hooks.{}'.format(
132 pipes.quote(sys.executable), modname,
133 )
134
135
136 _meta = (
137 (
138 'check-hooks-apply', (
139 ('name', 'Check hooks apply to the repository'),
140 ('files', C.CONFIG_FILE),
141 ('entry', _entry('check_hooks_apply')),
142 ),
143 ),
144 (
145 'check-useless-excludes', (
146 ('name', 'Check for useless excludes'),
147 ('files', C.CONFIG_FILE),
148 ('entry', _entry('check_useless_excludes')),
149 ),
150 ),
151 (
152 'identity', (
153 ('name', 'identity'),
154 ('verbose', True),
155 ('entry', _entry('identity')),
156 ),
157 ),
158 )
159
160 META_HOOK_DICT = cfgv.Map(
161 'Hook', 'id',
162 cfgv.Required('id', cfgv.check_string),
163 cfgv.Required('id', cfgv.check_one_of(tuple(k for k, _ in _meta))),
164 # language must be system
165 cfgv.Optional('language', cfgv.check_one_of({'system'}), 'system'),
166 *([
167 # default to the hook definition for the meta hooks
168 cfgv.ConditionalOptional(key, cfgv.check_any, value, 'id', hook_id)
169 for hook_id, values in _meta
170 for key, value in values
171 ] + [
172 # default to the "manifest" parsing
173 cfgv.OptionalNoDefault(item.key, item.check_fn)
174 # these will always be defaulted above
175 if item.key in {'name', 'language', 'entry'} else
176 item
177 for item in MANIFEST_HOOK_DICT.items
178 ])
179 )
180 CONFIG_HOOK_DICT = cfgv.Map(
181 'Hook', 'id',
182
183 cfgv.Required('id', cfgv.check_string),
184
185 # All keys in manifest hook dict are valid in a config hook dict, but
186 # are optional.
187 # No defaults are provided here as the config is merged on top of the
188 # manifest.
189 *[
190 cfgv.OptionalNoDefault(item.key, item.check_fn)
191 for item in MANIFEST_HOOK_DICT.items
192 if item.key != 'id'
193 ]
194 )
195 CONFIG_REPO_DICT = cfgv.Map(
196 'Repository', 'repo',
197
198 cfgv.Required('repo', cfgv.check_string),
199
200 cfgv.ConditionalRecurse(
201 'hooks', cfgv.Array(CONFIG_HOOK_DICT),
202 'repo', cfgv.NotIn(LOCAL, META),
203 ),
204 cfgv.ConditionalRecurse(
205 'hooks', cfgv.Array(MANIFEST_HOOK_DICT),
206 'repo', LOCAL,
207 ),
208 cfgv.ConditionalRecurse(
209 'hooks', cfgv.Array(META_HOOK_DICT),
210 'repo', META,
211 ),
212
213 MigrateShaToRev(),
214 )
215 DEFAULT_LANGUAGE_VERSION = cfgv.Map(
216 'DefaultLanguageVersion', None,
217 cfgv.NoAdditionalKeys(all_languages),
218 *[cfgv.Optional(x, cfgv.check_string, C.DEFAULT) for x in all_languages]
219 )
220 CONFIG_SCHEMA = cfgv.Map(
221 'Config', None,
222
223 cfgv.RequiredRecurse('repos', cfgv.Array(CONFIG_REPO_DICT)),
224 cfgv.OptionalRecurse(
225 'default_language_version', DEFAULT_LANGUAGE_VERSION, {},
226 ),
227 cfgv.Optional(
228 'default_stages',
229 cfgv.check_array(cfgv.check_one_of(C.STAGES)),
230 C.STAGES,
231 ),
232 cfgv.Optional('exclude', cfgv.check_regex, '^$'),
233 cfgv.Optional('fail_fast', cfgv.check_bool, False),
234 )
235
236
237 class InvalidConfigError(FatalError):
238 pass
239
240
241 def ordered_load_normalize_legacy_config(contents):
242 data = ordered_load(contents)
243 if isinstance(data, list):
244 # TODO: Once happy, issue a deprecation warning and instructions
245 return {'repos': data}
246 else:
247 return data
248
249
250 load_config = functools.partial(
251 cfgv.load_from_filename,
252 schema=CONFIG_SCHEMA,
253 load_strategy=ordered_load_normalize_legacy_config,
254 exc_tp=InvalidConfigError,
255 )
256
257
258 def validate_config_main(argv=None):
259 parser = _make_argparser('Config filenames.')
260 args = parser.parse_args(argv)
261 ret = 0
262 for filename in args.filenames:
263 try:
264 load_config(filename)
265 except InvalidConfigError as e:
266 print(e)
267 ret = 1
268 return ret
269
[end of pre_commit/clientlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/clientlib.py b/pre_commit/clientlib.py
--- a/pre_commit/clientlib.py
+++ b/pre_commit/clientlib.py
@@ -13,6 +13,7 @@
import pre_commit.constants as C
from pre_commit.error_handler import FatalError
from pre_commit.languages.all import all_languages
+from pre_commit.util import parse_version
def check_type_tag(tag):
@@ -23,6 +24,16 @@
)
+def check_min_version(version):
+ if parse_version(version) > parse_version(C.VERSION):
+ raise cfgv.ValidationError(
+ 'pre-commit version {} is required but version {} is installed. '
+ 'Perhaps run `pip install --upgrade pre-commit`.'.format(
+ version, C.VERSION,
+ ),
+ )
+
+
def _make_argparser(filenames_help):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help=filenames_help)
@@ -231,6 +242,11 @@
),
cfgv.Optional('exclude', cfgv.check_regex, '^$'),
cfgv.Optional('fail_fast', cfgv.check_bool, False),
+ cfgv.Optional(
+ 'minimum_pre_commit_version',
+ cfgv.check_and(cfgv.check_string, check_min_version),
+ '0',
+ ),
)
|
{"golden_diff": "diff --git a/pre_commit/clientlib.py b/pre_commit/clientlib.py\n--- a/pre_commit/clientlib.py\n+++ b/pre_commit/clientlib.py\n@@ -13,6 +13,7 @@\n import pre_commit.constants as C\n from pre_commit.error_handler import FatalError\n from pre_commit.languages.all import all_languages\n+from pre_commit.util import parse_version\n \n \n def check_type_tag(tag):\n@@ -23,6 +24,16 @@\n )\n \n \n+def check_min_version(version):\n+ if parse_version(version) > parse_version(C.VERSION):\n+ raise cfgv.ValidationError(\n+ 'pre-commit version {} is required but version {} is installed. '\n+ 'Perhaps run `pip install --upgrade pre-commit`.'.format(\n+ version, C.VERSION,\n+ ),\n+ )\n+\n+\n def _make_argparser(filenames_help):\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', help=filenames_help)\n@@ -231,6 +242,11 @@\n ),\n cfgv.Optional('exclude', cfgv.check_regex, '^$'),\n cfgv.Optional('fail_fast', cfgv.check_bool, False),\n+ cfgv.Optional(\n+ 'minimum_pre_commit_version',\n+ cfgv.check_and(cfgv.check_string, check_min_version),\n+ '0',\n+ ),\n )\n", "issue": "`minimum_pre_commit_version` at the top level in configuration\nSimilar to the one that's currently allowed on individual hooks, allow this at the top level of the `.pre-commit-config.yaml`\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport argparse\nimport functools\nimport pipes\nimport sys\n\nimport cfgv\nfrom aspy.yaml import ordered_load\nfrom identify.identify import ALL_TAGS\n\nimport pre_commit.constants as C\nfrom pre_commit.error_handler import FatalError\nfrom pre_commit.languages.all import all_languages\n\n\ndef check_type_tag(tag):\n if tag not in ALL_TAGS:\n raise cfgv.ValidationError(\n 'Type tag {!r} is not recognized. '\n 'Try upgrading identify and pre-commit?'.format(tag),\n )\n\n\ndef _make_argparser(filenames_help):\n parser = argparse.ArgumentParser()\n parser.add_argument('filenames', nargs='*', help=filenames_help)\n parser.add_argument('-V', '--version', action='version', version=C.VERSION)\n return parser\n\n\nMANIFEST_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('name', cfgv.check_string),\n cfgv.Required('entry', cfgv.check_string),\n cfgv.Required('language', cfgv.check_one_of(all_languages)),\n cfgv.Optional('alias', cfgv.check_string, ''),\n\n cfgv.Optional(\n 'files', cfgv.check_and(cfgv.check_string, cfgv.check_regex), '',\n ),\n cfgv.Optional(\n 'exclude', cfgv.check_and(cfgv.check_string, cfgv.check_regex), '^$',\n ),\n cfgv.Optional('types', cfgv.check_array(check_type_tag), ['file']),\n cfgv.Optional('exclude_types', cfgv.check_array(check_type_tag), []),\n\n cfgv.Optional(\n 'additional_dependencies', cfgv.check_array(cfgv.check_string), [],\n ),\n cfgv.Optional('args', cfgv.check_array(cfgv.check_string), []),\n cfgv.Optional('always_run', cfgv.check_bool, False),\n cfgv.Optional('pass_filenames', cfgv.check_bool, True),\n cfgv.Optional('description', cfgv.check_string, ''),\n cfgv.Optional('language_version', cfgv.check_string, C.DEFAULT),\n cfgv.Optional('log_file', cfgv.check_string, ''),\n cfgv.Optional('minimum_pre_commit_version', cfgv.check_string, '0'),\n cfgv.Optional('require_serial', cfgv.check_bool, False),\n cfgv.Optional('stages', cfgv.check_array(cfgv.check_one_of(C.STAGES)), []),\n cfgv.Optional('verbose', cfgv.check_bool, False),\n)\nMANIFEST_SCHEMA = cfgv.Array(MANIFEST_HOOK_DICT)\n\n\nclass InvalidManifestError(FatalError):\n pass\n\n\nload_manifest = functools.partial(\n cfgv.load_from_filename,\n schema=MANIFEST_SCHEMA,\n load_strategy=ordered_load,\n exc_tp=InvalidManifestError,\n)\n\n\ndef validate_manifest_main(argv=None):\n parser = _make_argparser('Manifest filenames.')\n args = parser.parse_args(argv)\n ret = 0\n for filename in args.filenames:\n try:\n load_manifest(filename)\n except InvalidManifestError as e:\n print(e)\n ret = 1\n return ret\n\n\nLOCAL = 'local'\nMETA = 'meta'\n\n\nclass MigrateShaToRev(object):\n @staticmethod\n def _cond(key):\n return cfgv.Conditional(\n key, cfgv.check_string,\n condition_key='repo',\n condition_value=cfgv.NotIn(LOCAL, META),\n ensure_absent=True,\n )\n\n def check(self, dct):\n if dct.get('repo') in {LOCAL, META}:\n self._cond('rev').check(dct)\n self._cond('sha').check(dct)\n elif 'sha' in dct and 'rev' in dct:\n raise cfgv.ValidationError('Cannot specify both sha and rev')\n elif 'sha' in dct:\n self._cond('sha').check(dct)\n else:\n self._cond('rev').check(dct)\n\n def apply_default(self, dct):\n if 'sha' in dct:\n dct['rev'] = dct.pop('sha')\n\n def remove_default(self, dct):\n pass\n\n\ndef _entry(modname):\n \"\"\"the hook `entry` is passed through `shlex.split()` by the command\n runner, so to prevent issues with spaces and backslashes (on Windows)\n it must be quoted here.\n \"\"\"\n return '{} -m pre_commit.meta_hooks.{}'.format(\n pipes.quote(sys.executable), modname,\n )\n\n\n_meta = (\n (\n 'check-hooks-apply', (\n ('name', 'Check hooks apply to the repository'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_hooks_apply')),\n ),\n ),\n (\n 'check-useless-excludes', (\n ('name', 'Check for useless excludes'),\n ('files', C.CONFIG_FILE),\n ('entry', _entry('check_useless_excludes')),\n ),\n ),\n (\n 'identity', (\n ('name', 'identity'),\n ('verbose', True),\n ('entry', _entry('identity')),\n ),\n ),\n)\n\nMETA_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n cfgv.Required('id', cfgv.check_string),\n cfgv.Required('id', cfgv.check_one_of(tuple(k for k, _ in _meta))),\n # language must be system\n cfgv.Optional('language', cfgv.check_one_of({'system'}), 'system'),\n *([\n # default to the hook definition for the meta hooks\n cfgv.ConditionalOptional(key, cfgv.check_any, value, 'id', hook_id)\n for hook_id, values in _meta\n for key, value in values\n ] + [\n # default to the \"manifest\" parsing\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n # these will always be defaulted above\n if item.key in {'name', 'language', 'entry'} else\n item\n for item in MANIFEST_HOOK_DICT.items\n ])\n)\nCONFIG_HOOK_DICT = cfgv.Map(\n 'Hook', 'id',\n\n cfgv.Required('id', cfgv.check_string),\n\n # All keys in manifest hook dict are valid in a config hook dict, but\n # are optional.\n # No defaults are provided here as the config is merged on top of the\n # manifest.\n *[\n cfgv.OptionalNoDefault(item.key, item.check_fn)\n for item in MANIFEST_HOOK_DICT.items\n if item.key != 'id'\n ]\n)\nCONFIG_REPO_DICT = cfgv.Map(\n 'Repository', 'repo',\n\n cfgv.Required('repo', cfgv.check_string),\n\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(CONFIG_HOOK_DICT),\n 'repo', cfgv.NotIn(LOCAL, META),\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(MANIFEST_HOOK_DICT),\n 'repo', LOCAL,\n ),\n cfgv.ConditionalRecurse(\n 'hooks', cfgv.Array(META_HOOK_DICT),\n 'repo', META,\n ),\n\n MigrateShaToRev(),\n)\nDEFAULT_LANGUAGE_VERSION = cfgv.Map(\n 'DefaultLanguageVersion', None,\n cfgv.NoAdditionalKeys(all_languages),\n *[cfgv.Optional(x, cfgv.check_string, C.DEFAULT) for x in all_languages]\n)\nCONFIG_SCHEMA = cfgv.Map(\n 'Config', None,\n\n cfgv.RequiredRecurse('repos', cfgv.Array(CONFIG_REPO_DICT)),\n cfgv.OptionalRecurse(\n 'default_language_version', DEFAULT_LANGUAGE_VERSION, {},\n ),\n cfgv.Optional(\n 'default_stages',\n cfgv.check_array(cfgv.check_one_of(C.STAGES)),\n C.STAGES,\n ),\n cfgv.Optional('exclude', cfgv.check_regex, '^$'),\n cfgv.Optional('fail_fast', cfgv.check_bool, False),\n)\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\ndef ordered_load_normalize_legacy_config(contents):\n data = ordered_load(contents)\n if isinstance(data, list):\n # TODO: Once happy, issue a deprecation warning and instructions\n return {'repos': data}\n else:\n return data\n\n\nload_config = functools.partial(\n cfgv.load_from_filename,\n schema=CONFIG_SCHEMA,\n load_strategy=ordered_load_normalize_legacy_config,\n exc_tp=InvalidConfigError,\n)\n\n\ndef validate_config_main(argv=None):\n parser = _make_argparser('Config filenames.')\n args = parser.parse_args(argv)\n ret = 0\n for filename in args.filenames:\n try:\n load_config(filename)\n except InvalidConfigError as e:\n print(e)\n ret = 1\n return ret\n", "path": "pre_commit/clientlib.py"}]}
| 3,153 | 293 |
gh_patches_debug_37409
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tacocabana is broken
During the global build at 2021-05-26-14-42-23, spider **tacocabana** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tacocabana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson))
</issue>
<code>
[start of locations/spiders/tacocabana.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8 class TacocabanaSpider(scrapy.Spider):
9 name = "tacocabana"
10 item_attributes = { 'brand': "Taco Cabana" }
11 allowed_domains = ["www.tacocabana.com"]
12 start_urls = (
13 "http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816",
14 )
15
16 def parse(self, response):
17 data = json.loads(re.sub(r"\s<.*?>.*<.*?>\s", "", response.body_as_unicode()))
18
19 for store in data:
20 properties = {
21 "phone" : store["phone_number"],
22 "ref" : str(store["locator_store_number"]),
23 "name" : store["post_title"],
24 "opening_hours" : store["hours"],
25 "website" : store["permalink"],
26 "lat" : store["x_coordinate"],
27 "lon" : store["y_coordinate"],
28 "street" : store["street_address_1"] + store["street_address_2"],
29 "city" : store["city"],
30 "state" : store["state"],
31 "postcode" : store["zip_code"]
32 }
33
34 yield GeojsonPointItem(**properties)
35
36 else:
37 self.logger.info("No results")
38
[end of locations/spiders/tacocabana.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/tacocabana.py b/locations/spiders/tacocabana.py
--- a/locations/spiders/tacocabana.py
+++ b/locations/spiders/tacocabana.py
@@ -1,37 +1,55 @@
# -*- coding: utf-8 -*-
import scrapy
-import json
-import re
from locations.items import GeojsonPointItem
+
class TacocabanaSpider(scrapy.Spider):
name = "tacocabana"
- item_attributes = { 'brand': "Taco Cabana" }
- allowed_domains = ["www.tacocabana.com"]
- start_urls = (
- "http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816",
- )
-
+ item_attributes = {"brand": "Taco Cabana"}
+ allowed_domains = ["api.koala.fuzzhq.com"]
+
+ def start_requests(self):
+ yield scrapy.http.JsonRequest(
+ "https://api.koala.fuzzhq.com/oauth/access_token",
+ data={
+ "client_id": "3nA4STkGif0fZGApqxMlVewy3h8HN6Fsy7jVOACP",
+ "client_secret": "8oBU5gWiNg04zYzz61hN3ETrTIzvmbGyeLCX0F1s",
+ "grant_type": "ordering_app_credentials",
+ "scope": "group:ordering_app",
+ },
+ callback=self.fetch_locations,
+ )
+
+ def fetch_locations(self, response):
+ self.access_token = response.json()["access_token"]
+ yield self.request(
+ "https://api.koala.fuzzhq.com/v1/ordering/store-locations/?include[]=operating_hours&include[]=attributes&per_page=50"
+ )
+
+ def request(self, url):
+ return scrapy.Request(
+ url, headers={"Authorization": f"Bearer {self.access_token}"}
+ )
+
def parse(self, response):
- data = json.loads(re.sub(r"\s<.*?>.*<.*?>\s", "", response.body_as_unicode()))
+ data = response.json()
- for store in data:
+ for store in data["data"]:
properties = {
- "phone" : store["phone_number"],
- "ref" : str(store["locator_store_number"]),
- "name" : store["post_title"],
- "opening_hours" : store["hours"],
- "website" : store["permalink"],
- "lat" : store["x_coordinate"],
- "lon" : store["y_coordinate"],
- "street" : store["street_address_1"] + store["street_address_2"],
- "city" : store["city"],
- "state" : store["state"],
- "postcode" : store["zip_code"]
+ "website": f'https://olo.tacocabana.com/menu/{store["slug"]}?showInfoModal=true',
+ "ref": store["brand_id"],
+ "lat": store["latitude"],
+ "lon": store["longitude"],
+ "addr_full": store["street_address"],
+ "city": store["city"],
+ "state": store["cached_data"]["state"],
+ "country": store["country"],
+ "postcode": store["zip_code"],
+ "phone": store["phone_number"],
}
-
yield GeojsonPointItem(**properties)
-
- else:
- self.logger.info("No results")
+
+ next_url = data["meta"]["pagination"]["links"]["next"]
+ if next_url:
+ yield self.request(next_url)
|
{"golden_diff": "diff --git a/locations/spiders/tacocabana.py b/locations/spiders/tacocabana.py\n--- a/locations/spiders/tacocabana.py\n+++ b/locations/spiders/tacocabana.py\n@@ -1,37 +1,55 @@\n # -*- coding: utf-8 -*-\n import scrapy\n-import json\n-import re\n \n from locations.items import GeojsonPointItem\n \n+\n class TacocabanaSpider(scrapy.Spider):\n name = \"tacocabana\"\n- item_attributes = { 'brand': \"Taco Cabana\" }\n- allowed_domains = [\"www.tacocabana.com\"]\n- start_urls = (\n- \"http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816\",\n- )\n- \n+ item_attributes = {\"brand\": \"Taco Cabana\"}\n+ allowed_domains = [\"api.koala.fuzzhq.com\"]\n+\n+ def start_requests(self):\n+ yield scrapy.http.JsonRequest(\n+ \"https://api.koala.fuzzhq.com/oauth/access_token\",\n+ data={\n+ \"client_id\": \"3nA4STkGif0fZGApqxMlVewy3h8HN6Fsy7jVOACP\",\n+ \"client_secret\": \"8oBU5gWiNg04zYzz61hN3ETrTIzvmbGyeLCX0F1s\",\n+ \"grant_type\": \"ordering_app_credentials\",\n+ \"scope\": \"group:ordering_app\",\n+ },\n+ callback=self.fetch_locations,\n+ )\n+\n+ def fetch_locations(self, response):\n+ self.access_token = response.json()[\"access_token\"]\n+ yield self.request(\n+ \"https://api.koala.fuzzhq.com/v1/ordering/store-locations/?include[]=operating_hours&include[]=attributes&per_page=50\"\n+ )\n+\n+ def request(self, url):\n+ return scrapy.Request(\n+ url, headers={\"Authorization\": f\"Bearer {self.access_token}\"}\n+ )\n+\n def parse(self, response):\n- data = json.loads(re.sub(r\"\\s<.*?>.*<.*?>\\s\", \"\", response.body_as_unicode()))\n+ data = response.json()\n \n- for store in data:\n+ for store in data[\"data\"]:\n properties = {\n- \"phone\" : store[\"phone_number\"],\n- \"ref\" : str(store[\"locator_store_number\"]),\n- \"name\" : store[\"post_title\"],\n- \"opening_hours\" : store[\"hours\"],\n- \"website\" : store[\"permalink\"],\n- \"lat\" : store[\"x_coordinate\"],\n- \"lon\" : store[\"y_coordinate\"],\n- \"street\" : store[\"street_address_1\"] + store[\"street_address_2\"],\n- \"city\" : store[\"city\"],\n- \"state\" : store[\"state\"],\n- \"postcode\" : store[\"zip_code\"]\n+ \"website\": f'https://olo.tacocabana.com/menu/{store[\"slug\"]}?showInfoModal=true',\n+ \"ref\": store[\"brand_id\"],\n+ \"lat\": store[\"latitude\"],\n+ \"lon\": store[\"longitude\"],\n+ \"addr_full\": store[\"street_address\"],\n+ \"city\": store[\"city\"],\n+ \"state\": store[\"cached_data\"][\"state\"],\n+ \"country\": store[\"country\"],\n+ \"postcode\": store[\"zip_code\"],\n+ \"phone\": store[\"phone_number\"],\n }\n- \n yield GeojsonPointItem(**properties)\n- \n- else:\n- self.logger.info(\"No results\")\n+\n+ next_url = data[\"meta\"][\"pagination\"][\"links\"][\"next\"]\n+ if next_url:\n+ yield self.request(next_url)\n", "issue": "Spider tacocabana is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tacocabana** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tacocabana.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tacocabana.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\nclass TacocabanaSpider(scrapy.Spider):\n name = \"tacocabana\"\n item_attributes = { 'brand': \"Taco Cabana\" }\n allowed_domains = [\"www.tacocabana.com\"]\n start_urls = (\n \"http://www.tacocabana.com/wp-admin/admin-ajax.php?action=get_ajax_processor&processor=get-locations&queryType=&postID=816\",\n )\n \n def parse(self, response):\n data = json.loads(re.sub(r\"\\s<.*?>.*<.*?>\\s\", \"\", response.body_as_unicode()))\n\n for store in data:\n properties = {\n \"phone\" : store[\"phone_number\"],\n \"ref\" : str(store[\"locator_store_number\"]),\n \"name\" : store[\"post_title\"],\n \"opening_hours\" : store[\"hours\"],\n \"website\" : store[\"permalink\"],\n \"lat\" : store[\"x_coordinate\"],\n \"lon\" : store[\"y_coordinate\"],\n \"street\" : store[\"street_address_1\"] + store[\"street_address_2\"],\n \"city\" : store[\"city\"],\n \"state\" : store[\"state\"],\n \"postcode\" : store[\"zip_code\"]\n }\n \n yield GeojsonPointItem(**properties)\n \n else:\n self.logger.info(\"No results\")\n", "path": "locations/spiders/tacocabana.py"}]}
| 1,111 | 839 |
gh_patches_debug_11219
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-6523
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError in jedi_plugin.py:102
I am getting the following error with the current master:
```
Response error:Traceback (most recent call last):
File "plugin_server.py", line 95, in run
response['result'] = func(*args, **kwargs)
File "/home/jitse/misc/spyder/spyder/utils/introspection/jedi_plugin.py", line 102, in get_info
elif call_def.doc and '(' in call_def.doc.splitlines()[0]:
AttributeError: 'Definition' object has no attribute 'doc'
```
This is almost certainly due to a mistake on my part in #6497 that was not found by the automatic tests, so I will take care of it shortly.
To trigger, press `Ctrl+I` when the cursor is on `end_callbacks` on line 119 of spyder/widgets/editor.py .
</issue>
<code>
[start of spyder/utils/introspection/jedi_plugin.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Jedi Introspection Plugin
9 """
10 import re
11 import os.path as osp
12 import sys
13 import time
14
15 from spyder.config.base import debug_print
16 from spyder.utils import programs
17 from spyder.utils.debug import log_last_error, log_dt
18 from spyder.utils.dochelpers import getsignaturefromtext
19 from spyder.utils.introspection.manager import (
20 DEBUG_EDITOR, LOG_FILENAME, IntrospectionPlugin)
21 from spyder.utils.introspection.utils import (default_info_response,
22 get_parent_until)
23 from spyder.utils.introspection.manager import JEDI_REQVER
24
25 try:
26 import jedi
27 except ImportError:
28 jedi = None
29
30
31 class JediPlugin(IntrospectionPlugin):
32 """
33 Jedi based introspection plugin for jedi
34
35 Experimental Editor's code completion, go-to-definition and help
36 """
37
38 # ---- IntrospectionPlugin API --------------------------------------------
39 name = 'jedi'
40
41 def load_plugin(self):
42 """Load the Jedi introspection plugin"""
43 if not programs.is_module_installed('jedi', JEDI_REQVER):
44 raise ImportError('Requires Jedi %s' % JEDI_REQVER)
45 jedi.settings.case_insensitive_completion = False
46 for lib in ['numpy', 'matplotlib']:
47 jedi.preload_module(lib)
48
49 def get_completions(self, info):
50 """Return a list of (completion, type) tuples"""
51 completions = self.get_jedi_object('completions', info)
52 if DEBUG_EDITOR:
53 log_last_error(LOG_FILENAME, str("comp: " + str(completions)[:100]))
54 if completions is not None:
55 completions = [(c.name, c.type) for c in completions]
56 else:
57 completions = []
58 debug_print(str(completions)[:100])
59 return completions
60
61 def get_info(self, info):
62 """
63 Find the calltip and docs
64
65 Returns a dict like the following:
66 {'note': 'Function of numpy.core.numeric...',
67 'argspec': "(shape, dtype=None, order='C')'
68 'docstring': 'Return an array of given...'
69 'name': 'ones',
70 'calltip': 'ones(shape, dtype=None, order='C')'}
71 """
72 call_def = self.get_jedi_object('goto_definitions', info)
73
74 for cd in call_def:
75 docstring = cd.docstring()
76 if docstring and not docstring.rstrip().endswith(')'):
77 call_def = cd
78 break
79 else:
80 try:
81 call_def = call_def[0]
82 docstring = call_def.docstring()
83 except IndexError:
84 return default_info_response()
85
86 name = call_def.name
87 if name is None:
88 return default_info_response()
89
90 if call_def.module_path:
91 mod_name = get_parent_until(call_def.module_path)
92 else:
93 mod_name = None
94
95 if not mod_name:
96 mod_name = call_def.module_name
97
98 if docstring.startswith(name + '('):
99 calltip = getsignaturefromtext(docstring, name)
100 argspec = calltip[calltip.find('('):]
101 docstring = docstring[docstring.find(')') + 3:]
102 elif call_def.doc and '(' in call_def.doc.splitlines()[0]:
103 calltip = docstring.splitlines()[0]
104 name = docstring.split('(')[0]
105 docstring = docstring[docstring.find(')') + 3:]
106 argspec = calltip[calltip.find('('):]
107 else:
108 calltip = name + '(...)'
109 argspec = '()'
110
111 if call_def.type == 'module':
112 note = 'Module %s' % mod_name
113 argspec = ''
114 calltip = name
115 elif call_def.type == 'class':
116 note = 'Class in %s module' % mod_name
117 elif docstring.startswith('%s(self' % name):
118 class_name = call_def.full_name.split('.')[-2]
119 note = 'Method of %s class in %s module' % (
120 class_name.capitalize(), mod_name)
121 else:
122 note = '%s in %s module' % (call_def.type.capitalize(),
123 mod_name)
124
125 argspec = argspec.replace(' = ', '=')
126 calltip = calltip.replace(' = ', '=')
127 debug_print(call_def.name)
128
129 doc_info = dict(name=name, argspec=argspec,
130 note=note, docstring=docstring, calltip=calltip)
131 return doc_info
132
133 def get_definition(self, info):
134 """
135 Find a definition location using Jedi
136
137 Follows gotos until a definition is found, or it reaches a builtin
138 module. Falls back on token lookup if it is in an enaml file or does
139 not find a match
140 """
141 line, filename = info['line_num'], info['filename']
142 def_info, module_path, line_nr = None, None, None
143 gotos = self.get_jedi_object('goto_assignments', info)
144
145 if gotos:
146 def_info = self.get_definition_info(gotos[0])
147 if def_info and def_info['goto_next']:
148 defns = self.get_jedi_object('goto_definitions', info)
149 new_info = None
150 if defns:
151 new_info = self.get_definition_info(defns[0])
152 if new_info and not new_info['in_builtin']:
153 def_info = new_info
154 elif not def_info:
155 return
156
157 # handle builtins -> try and find the module
158 if def_info and def_info['in_builtin']:
159 module_path, line_nr = self.find_in_builtin(def_info)
160 elif def_info:
161 module_path = def_info['module_path']
162 line_nr = def_info['line_nr']
163
164 # Handle failures to find module_path and line_nr
165 if module_path == filename and line_nr == line:
166 return
167 elif module_path is None:
168 return
169
170 return module_path, line_nr
171
172 # ---- Private API -------------------------------------------------------
173
174 def get_jedi_object(self, func_name, info, use_filename=True):
175 """Call a desired function on a Jedi Script and return the result"""
176 if not jedi:
177 return
178 if DEBUG_EDITOR:
179 t0 = time.time()
180 # override IPython qt_loaders ImportDenier behavior
181 metas = sys.meta_path
182 for meta in metas:
183 if (meta.__class__.__name__ == 'ImportDenier'
184 and hasattr(meta, 'forbid')):
185 sys.meta_path.remove(meta)
186
187 if use_filename:
188 filename = info['filename']
189 else:
190 filename = None
191
192 try:
193 script = jedi.api.Script(info['source_code'], info['line_num'],
194 info['column'], filename,
195 sys_path=info['sys_path'])
196 func = getattr(script, func_name)
197 val = func()
198 except Exception as e:
199 val = None
200 debug_print('Jedi error (%s)' % func_name)
201 debug_print(str(e))
202 if DEBUG_EDITOR:
203 log_last_error(LOG_FILENAME, str(e))
204 if DEBUG_EDITOR:
205 log_dt(LOG_FILENAME, func_name, t0)
206 if not val and filename:
207 return self.get_jedi_object(func_name, info, False)
208 else:
209 return val
210
211 @staticmethod
212 def get_definition_info(defn):
213 """Extract definition information from the Jedi definition object"""
214 try:
215 module_path = defn.module_path
216 name = defn.name
217 if hasattr(defn, 'line_nr'):
218 line_nr = defn.line_nr
219 else:
220 line_nr = defn.line
221 description = defn.description
222 in_builtin = defn.in_builtin_module()
223 except Exception as e:
224 if DEBUG_EDITOR:
225 log_last_error(LOG_FILENAME, 'Get Defintion: %s' % e)
226 return None
227 pattern = r'class\s+{0}|def\s+{0}|self.{0}\s*=|{0}\s*='.format(name)
228 if not re.match(pattern, description):
229 goto_next = True
230 else:
231 goto_next = False
232 return dict(module_path=module_path, line_nr=line_nr,
233 description=description, name=name, in_builtin=in_builtin,
234 goto_next=goto_next)
235
236 def find_in_builtin(self, info):
237 """Find a definition in a builtin file"""
238 module_path = info['module_path']
239 line_nr = info['line_nr']
240
241 if module_path is None:
242 return None, None
243
244 ext = osp.splitext(info['module_path'])[1]
245 desc = info['description']
246 name = info['name']
247 if ext in self.python_like_exts() and (
248 desc.startswith('import ') or desc.startswith('from ')):
249 path = self.python_like_mod_finder(desc,
250 osp.dirname(module_path), name)
251 if path:
252 info['module_path'] = module_path = path
253 info['line_nr'] = line_nr = 1
254
255 if ext in self.all_editable_exts():
256 pattern = r'from.*\W{0}\W?.*c?import|import.*\W{0}'
257 if not re.match(pattern.format(info['name']), desc):
258 line_nr = self.get_definition_from_file(module_path, name,
259 line_nr)
260 if not line_nr:
261 module_path = None
262
263 if not ext in self.all_editable_exts():
264 line_nr = None
265
266 return module_path, line_nr
267
[end of spyder/utils/introspection/jedi_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/utils/introspection/jedi_plugin.py b/spyder/utils/introspection/jedi_plugin.py
--- a/spyder/utils/introspection/jedi_plugin.py
+++ b/spyder/utils/introspection/jedi_plugin.py
@@ -99,7 +99,7 @@
calltip = getsignaturefromtext(docstring, name)
argspec = calltip[calltip.find('('):]
docstring = docstring[docstring.find(')') + 3:]
- elif call_def.doc and '(' in call_def.doc.splitlines()[0]:
+ elif docstring and '(' in docstring.splitlines()[0]:
calltip = docstring.splitlines()[0]
name = docstring.split('(')[0]
docstring = docstring[docstring.find(')') + 3:]
|
{"golden_diff": "diff --git a/spyder/utils/introspection/jedi_plugin.py b/spyder/utils/introspection/jedi_plugin.py\n--- a/spyder/utils/introspection/jedi_plugin.py\n+++ b/spyder/utils/introspection/jedi_plugin.py\n@@ -99,7 +99,7 @@\n calltip = getsignaturefromtext(docstring, name)\n argspec = calltip[calltip.find('('):]\n docstring = docstring[docstring.find(')') + 3:]\n- elif call_def.doc and '(' in call_def.doc.splitlines()[0]:\n+ elif docstring and '(' in docstring.splitlines()[0]:\n calltip = docstring.splitlines()[0]\n name = docstring.split('(')[0]\n docstring = docstring[docstring.find(')') + 3:]\n", "issue": "AttributeError in jedi_plugin.py:102\nI am getting the following error with the current master:\r\n```\r\nResponse error:Traceback (most recent call last):\r\n File \"plugin_server.py\", line 95, in run\r\n response['result'] = func(*args, **kwargs)\r\n File \"/home/jitse/misc/spyder/spyder/utils/introspection/jedi_plugin.py\", line 102, in get_info\r\n elif call_def.doc and '(' in call_def.doc.splitlines()[0]:\r\nAttributeError: 'Definition' object has no attribute 'doc'\r\n```\r\nThis is almost certainly due to a mistake on my part in #6497 that was not found by the automatic tests, so I will take care of it shortly.\r\n\r\nTo trigger, press `Ctrl+I` when the cursor is on `end_callbacks` on line 119 of spyder/widgets/editor.py .\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nJedi Introspection Plugin\n\"\"\"\nimport re\nimport os.path as osp\nimport sys\nimport time\n\nfrom spyder.config.base import debug_print\nfrom spyder.utils import programs\nfrom spyder.utils.debug import log_last_error, log_dt\nfrom spyder.utils.dochelpers import getsignaturefromtext\nfrom spyder.utils.introspection.manager import (\n DEBUG_EDITOR, LOG_FILENAME, IntrospectionPlugin)\nfrom spyder.utils.introspection.utils import (default_info_response,\n get_parent_until)\nfrom spyder.utils.introspection.manager import JEDI_REQVER\n\ntry:\n import jedi\nexcept ImportError:\n jedi = None\n\n\nclass JediPlugin(IntrospectionPlugin):\n \"\"\"\n Jedi based introspection plugin for jedi\n\n Experimental Editor's code completion, go-to-definition and help\n \"\"\"\n\n # ---- IntrospectionPlugin API --------------------------------------------\n name = 'jedi'\n\n def load_plugin(self):\n \"\"\"Load the Jedi introspection plugin\"\"\"\n if not programs.is_module_installed('jedi', JEDI_REQVER):\n raise ImportError('Requires Jedi %s' % JEDI_REQVER)\n jedi.settings.case_insensitive_completion = False\n for lib in ['numpy', 'matplotlib']:\n jedi.preload_module(lib)\n\n def get_completions(self, info):\n \"\"\"Return a list of (completion, type) tuples\"\"\"\n completions = self.get_jedi_object('completions', info)\n if DEBUG_EDITOR:\n log_last_error(LOG_FILENAME, str(\"comp: \" + str(completions)[:100]))\n if completions is not None:\n completions = [(c.name, c.type) for c in completions]\n else:\n completions = []\n debug_print(str(completions)[:100])\n return completions\n\n def get_info(self, info):\n \"\"\"\n Find the calltip and docs\n\n Returns a dict like the following:\n {'note': 'Function of numpy.core.numeric...',\n 'argspec': \"(shape, dtype=None, order='C')'\n 'docstring': 'Return an array of given...'\n 'name': 'ones',\n 'calltip': 'ones(shape, dtype=None, order='C')'}\n \"\"\"\n call_def = self.get_jedi_object('goto_definitions', info)\n\n for cd in call_def:\n docstring = cd.docstring()\n if docstring and not docstring.rstrip().endswith(')'):\n call_def = cd\n break\n else:\n try:\n call_def = call_def[0]\n docstring = call_def.docstring()\n except IndexError:\n return default_info_response()\n\n name = call_def.name\n if name is None:\n return default_info_response()\n\n if call_def.module_path:\n mod_name = get_parent_until(call_def.module_path)\n else:\n mod_name = None\n\n if not mod_name:\n mod_name = call_def.module_name\n\n if docstring.startswith(name + '('):\n calltip = getsignaturefromtext(docstring, name)\n argspec = calltip[calltip.find('('):]\n docstring = docstring[docstring.find(')') + 3:]\n elif call_def.doc and '(' in call_def.doc.splitlines()[0]:\n calltip = docstring.splitlines()[0]\n name = docstring.split('(')[0]\n docstring = docstring[docstring.find(')') + 3:]\n argspec = calltip[calltip.find('('):]\n else:\n calltip = name + '(...)'\n argspec = '()'\n\n if call_def.type == 'module':\n note = 'Module %s' % mod_name\n argspec = ''\n calltip = name\n elif call_def.type == 'class':\n note = 'Class in %s module' % mod_name\n elif docstring.startswith('%s(self' % name):\n class_name = call_def.full_name.split('.')[-2]\n note = 'Method of %s class in %s module' % (\n class_name.capitalize(), mod_name)\n else:\n note = '%s in %s module' % (call_def.type.capitalize(),\n mod_name)\n\n argspec = argspec.replace(' = ', '=')\n calltip = calltip.replace(' = ', '=')\n debug_print(call_def.name)\n\n doc_info = dict(name=name, argspec=argspec,\n note=note, docstring=docstring, calltip=calltip)\n return doc_info\n\n def get_definition(self, info):\n \"\"\"\n Find a definition location using Jedi\n\n Follows gotos until a definition is found, or it reaches a builtin\n module. Falls back on token lookup if it is in an enaml file or does\n not find a match\n \"\"\"\n line, filename = info['line_num'], info['filename']\n def_info, module_path, line_nr = None, None, None\n gotos = self.get_jedi_object('goto_assignments', info)\n\n if gotos:\n def_info = self.get_definition_info(gotos[0])\n if def_info and def_info['goto_next']:\n defns = self.get_jedi_object('goto_definitions', info)\n new_info = None\n if defns:\n new_info = self.get_definition_info(defns[0])\n if new_info and not new_info['in_builtin']:\n def_info = new_info\n elif not def_info:\n return\n\n # handle builtins -> try and find the module\n if def_info and def_info['in_builtin']:\n module_path, line_nr = self.find_in_builtin(def_info)\n elif def_info:\n module_path = def_info['module_path']\n line_nr = def_info['line_nr']\n\n # Handle failures to find module_path and line_nr\n if module_path == filename and line_nr == line:\n return\n elif module_path is None:\n return\n\n return module_path, line_nr\n\n # ---- Private API -------------------------------------------------------\n\n def get_jedi_object(self, func_name, info, use_filename=True):\n \"\"\"Call a desired function on a Jedi Script and return the result\"\"\"\n if not jedi:\n return\n if DEBUG_EDITOR:\n t0 = time.time()\n # override IPython qt_loaders ImportDenier behavior\n metas = sys.meta_path\n for meta in metas:\n if (meta.__class__.__name__ == 'ImportDenier'\n and hasattr(meta, 'forbid')):\n sys.meta_path.remove(meta)\n\n if use_filename:\n filename = info['filename']\n else:\n filename = None\n\n try:\n script = jedi.api.Script(info['source_code'], info['line_num'],\n info['column'], filename,\n sys_path=info['sys_path'])\n func = getattr(script, func_name)\n val = func()\n except Exception as e:\n val = None\n debug_print('Jedi error (%s)' % func_name)\n debug_print(str(e))\n if DEBUG_EDITOR:\n log_last_error(LOG_FILENAME, str(e))\n if DEBUG_EDITOR:\n log_dt(LOG_FILENAME, func_name, t0)\n if not val and filename:\n return self.get_jedi_object(func_name, info, False)\n else:\n return val\n\n @staticmethod\n def get_definition_info(defn):\n \"\"\"Extract definition information from the Jedi definition object\"\"\"\n try:\n module_path = defn.module_path\n name = defn.name\n if hasattr(defn, 'line_nr'):\n line_nr = defn.line_nr\n else:\n line_nr = defn.line\n description = defn.description\n in_builtin = defn.in_builtin_module()\n except Exception as e:\n if DEBUG_EDITOR:\n log_last_error(LOG_FILENAME, 'Get Defintion: %s' % e)\n return None\n pattern = r'class\\s+{0}|def\\s+{0}|self.{0}\\s*=|{0}\\s*='.format(name)\n if not re.match(pattern, description):\n goto_next = True\n else:\n goto_next = False\n return dict(module_path=module_path, line_nr=line_nr,\n description=description, name=name, in_builtin=in_builtin,\n goto_next=goto_next)\n\n def find_in_builtin(self, info):\n \"\"\"Find a definition in a builtin file\"\"\"\n module_path = info['module_path']\n line_nr = info['line_nr']\n\n if module_path is None:\n return None, None\n\n ext = osp.splitext(info['module_path'])[1]\n desc = info['description']\n name = info['name']\n if ext in self.python_like_exts() and (\n desc.startswith('import ') or desc.startswith('from ')):\n path = self.python_like_mod_finder(desc,\n osp.dirname(module_path), name)\n if path:\n info['module_path'] = module_path = path\n info['line_nr'] = line_nr = 1\n\n if ext in self.all_editable_exts():\n pattern = r'from.*\\W{0}\\W?.*c?import|import.*\\W{0}'\n if not re.match(pattern.format(info['name']), desc):\n line_nr = self.get_definition_from_file(module_path, name,\n line_nr)\n if not line_nr:\n module_path = None\n\n if not ext in self.all_editable_exts():\n line_nr = None\n\n return module_path, line_nr\n", "path": "spyder/utils/introspection/jedi_plugin.py"}]}
| 3,521 | 182 |
gh_patches_debug_2630
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-826
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pt_BR email not returning valid email addresses
When creating a fake Factory with the pt_BR it is not returning valid email addresses.
Example:
```
melocauã@bol.com.br
joã[email protected]
laví[email protected]
vitó[email protected]
```
</issue>
<code>
[start of faker/providers/internet/pt_BR/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as InternetProvider
4
5
6 class Provider(InternetProvider):
7 safe_email_tlds = ('com', 'net', 'br', 'br')
8 free_email_domains = (
9 'gmail.com',
10 'hotmail.com',
11 'yahoo.com.br',
12 'uol.com.br',
13 'bol.com.br',
14 'ig.com.br')
15 tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')
16
[end of faker/providers/internet/pt_BR/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/internet/pt_BR/__init__.py b/faker/providers/internet/pt_BR/__init__.py
--- a/faker/providers/internet/pt_BR/__init__.py
+++ b/faker/providers/internet/pt_BR/__init__.py
@@ -13,3 +13,11 @@
'bol.com.br',
'ig.com.br')
tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')
+ replacements = (
+ ('à', 'a'), ('â', 'a'), ('ã', 'a'),
+ ('ç', 'c'),
+ ('é', 'e'), ('ê', 'e'),
+ ('í', 'i'),
+ ('ô', 'o'), ('ö', 'o'), ('õ', 'o'),
+ ('ú', 'u'),
+ )
|
{"golden_diff": "diff --git a/faker/providers/internet/pt_BR/__init__.py b/faker/providers/internet/pt_BR/__init__.py\n--- a/faker/providers/internet/pt_BR/__init__.py\n+++ b/faker/providers/internet/pt_BR/__init__.py\n@@ -13,3 +13,11 @@\n 'bol.com.br',\n 'ig.com.br')\n tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')\n+ replacements = (\n+ ('\u00e0', 'a'), ('\u00e2', 'a'), ('\u00e3', 'a'),\n+ ('\u00e7', 'c'),\n+ ('\u00e9', 'e'), ('\u00ea', 'e'),\n+ ('\u00ed', 'i'),\n+ ('\u00f4', 'o'), ('\u00f6', 'o'), ('\u00f5', 'o'),\n+ ('\u00fa', 'u'),\n+ )\n", "issue": "pt_BR email not returning valid email addresses\nWhen creating a fake Factory with the pt_BR it is not returning valid email addresses.\r\nExample:\r\n```\r\nmelocau\[email protected]\r\njo\[email protected]\r\nlav\[email protected]\r\nvit\[email protected]\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\n\nclass Provider(InternetProvider):\n safe_email_tlds = ('com', 'net', 'br', 'br')\n free_email_domains = (\n 'gmail.com',\n 'hotmail.com',\n 'yahoo.com.br',\n 'uol.com.br',\n 'bol.com.br',\n 'ig.com.br')\n tlds = ('com', 'com', 'com', 'net', 'org', 'br', 'br', 'br')\n", "path": "faker/providers/internet/pt_BR/__init__.py"}]}
| 759 | 199 |
gh_patches_debug_19253
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-9114
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Github Action "Recent Comment Slack Bot" to mark issues as Needs: Response
<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
```
def add_label_to_issues(ids):
owner = 'internetarchive'
repo = 'openlibrary'
labels_url = f"https://api.github.com/repos/{owner}/{repo}/issues/labels"
payload = {"labels": ["Needs: Response"]}
for issue_number in ids:
issue_labels_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels"
response = requests.post(issue_labels_url, json=payload, headers=github_headers)
if __name__ == "__main__":
add_label_to_issues([9054, 9065])
```
to https://github.com/internetarchive/openlibrary/blob/master/scripts/gh_scripts/issue_comment_bot.py#L255
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of scripts/gh_scripts/issue_comment_bot.py]
1 #!/usr/bin/env python
2 """
3 Fetches Open Library GitHub issues that have been commented on
4 within some amount of time, in hours.
5
6 Writes links to each issue to given Slack channel.
7 """
8 import argparse
9 import errno
10 import os
11 import sys
12 import time
13
14 from datetime import datetime, timedelta
15 from typing import Any
16
17 import requests
18
19 # Maps lead label to GitHub username
20 lead_label_to_username = {
21 'Lead: @mekarpeles': 'mekarpeles',
22 'Lead: @cdrini': 'cdrini',
23 'Lead: @scottbarnes': 'scottbarnes',
24 'Lead: @seabelis': 'seabelis',
25 'Lead: @jimchamp': 'jimchamp',
26 }
27
28 # Maps GitHub username to Slack ID
29 username_to_slack_id = {
30 'mekarpeles': '<@mek>',
31 'cdrini': '<@cdrini>',
32 'scottbarnes': '<@U03MNR6T7FH>',
33 'seabelis': '<@UAHQ39ACT>',
34 'jimchamp': '<@U01ARTHG9EV>',
35 'hornc': '<@U0EUS8DV0>',
36 }
37
38 github_headers = {
39 'X-GitHub-Api-Version': '2022-11-28',
40 'Accept': 'application/vnd.github+json',
41 }
42
43
44 def fetch_issues(updated_since: str):
45 """
46 Fetches all GitHub issues that have been updated since the given date string and have at least one comment.
47
48 GitHub results are paginated. This functions appends each result to a list, and does so for all pages.
49 To keep API calls to a minimum, we request the maximum number of results per request (100 per page, as of writing).
50
51 Important: Updated issues need not have a recent comment. Update events include many other things, such as adding a
52 label to an issue, or moving an issue to a milestone. Issues returned by this function will require additional
53 processing in order to determine if they have recent comments.
54 """
55 # Make initial query for updated issues:
56 query = f'repo:internetarchive/openlibrary is:open is:issue comments:>0 updated:>{updated_since}'
57 p: dict[str, str | int] = {
58 'q': query,
59 'per_page': 100,
60 }
61 response = requests.get(
62 'https://api.github.com/search/issues', params=p, headers=github_headers
63 )
64 d = response.json()
65 results = d['items']
66
67 # Fetch additional updated issues, if any exist
68 def get_next_page(url: str):
69 """Returns list of issues and optional url for next page"""
70 resp = requests.get(url, headers=github_headers)
71 # Get issues
72 d = resp.json()
73 issues = d['items']
74 # Prepare url for next page
75 next = resp.links.get('next', {})
76 next_url = next.get('url', '')
77
78 return issues, next_url
79
80 links = response.links
81 next = links.get('next', {})
82 next_url = next.get('url', '')
83 while next_url:
84 # Make call with next link
85 issues, next_url = get_next_page(next_url)
86 results = results + issues
87
88 return results
89
90
91 def filter_issues(issues: list, since: datetime):
92 """
93 Returns list of issues that were not last responded to by staff.
94 Requires fetching the most recent comments for the given issues.
95 """
96 results = []
97
98 for i in issues:
99 # Fetch comments using URL from previous GitHub search results
100 comments_url = i.get('comments_url')
101 resp = requests.get(
102 comments_url, params={'per_page': 100}, headers=github_headers
103 )
104
105 # Ensure that we have the last page of comments
106 links = resp.links
107 last = links.get('last', {})
108 last_url = last.get('url', '')
109
110 if last_url:
111 resp = requests.get(last_url, headers=github_headers)
112
113 # Get last comment
114 comments = resp.json()
115 last_comment = comments[-1]
116
117 # Determine if last comment meets our criteria for Slack notifications
118 # First step: Ensure that the last comment was left after the given `since` datetime
119 created = datetime.fromisoformat(last_comment['created_at'])
120 # Removing timezone info to avoid TypeErrors, which occur when
121 # comparing a timezone-aware datetime with a timezone-naive datetime
122 created = created.replace(tzinfo=None)
123 if created > since:
124 # Next step: Determine if the last commenter is a staff member
125 last_commenter = last_comment['user']['login']
126 if last_commenter not in username_to_slack_id:
127 lead_label = find_lead_label(i.get('labels', []))
128 results.append(
129 {
130 'comment_url': last_comment['html_url'],
131 'commenter': last_commenter,
132 'issue_title': i['title'],
133 'lead_label': lead_label,
134 }
135 )
136
137 return results
138
139
140 def find_lead_label(labels: list[dict[str, Any]]) -> str:
141 """
142 Finds and returns the name of the first lead label found in the given list of GitHub labels.
143
144 Returns an empty string if no lead label is found
145 """
146 result = ''
147 for label in labels:
148 if label['name'].startswith('Lead:'):
149 result = label['name']
150 break
151
152 return result
153
154
155 def publish_digest(
156 issues: list[dict[str, str]],
157 slack_channel: str,
158 slack_token: str,
159 hours_passed: int,
160 ):
161 """
162 Creates a threaded Slack messaged containing a digest of recently commented GitHub issues.
163
164 Parent Slack message will say how many comments were left, and the timeframe. Each reply
165 will include a link to the comment, as well as additional information.
166 """
167 # Create the parent message
168 parent_thread_msg = (
169 f'{len(issues)} new GitHub comment(s) since {hours_passed} hour(s) ago'
170 )
171
172 response = requests.post(
173 'https://slack.com/api/chat.postMessage',
174 headers={
175 'Authorization': f"Bearer {slack_token}",
176 'Content-Type': 'application/json; charset=utf-8',
177 },
178 json={
179 'channel': slack_channel,
180 'text': parent_thread_msg,
181 },
182 )
183
184 if response.status_code != 200:
185 # XXX : Log this
186 print(f'Failed to send message to Slack. Status code: {response.status_code}')
187 # XXX : Add retry logic?
188 sys.exit(errno.ECOMM)
189
190 d = response.json()
191 # Store timestamp, which, along with the channel, uniquely identifies the parent thread
192 ts = d.get('ts')
193
194 def comment_on_thread(message: str):
195 """
196 Posts the given message as a reply to the parent message.
197 """
198 response = requests.post(
199 'https://slack.com/api/chat.postMessage',
200 headers={
201 'Authorization': f"Bearer {slack_token}",
202 'Content-Type': 'application/json; charset=utf-8',
203 },
204 json={
205 'channel': slack_channel,
206 'text': message,
207 'thread_ts': ts,
208 },
209 )
210 if response.status_code != 200:
211 # XXX : Check "ok" field for errors
212 # XXX : Log this
213 print(
214 f'Failed to POST slack message\n Status code: {response.status_code}\n Message: {message}'
215 )
216 # XXX : Retry logic?
217
218 for i in issues:
219 # Slack rate limit is roughly 1 request per second
220 time.sleep(1)
221
222 comment_url = i['comment_url']
223 issue_title = i['issue_title']
224 commenter = i['commenter']
225 message = f'<{comment_url}|Latest comment for: *{issue_title}*>\n'
226
227 username = lead_label_to_username.get(i['lead_label'], '')
228 slack_id = username_to_slack_id.get(username, '')
229 if slack_id:
230 message += f'Lead: {slack_id}\n'
231 elif i['lead_label']:
232 message += f'{i["lead_label"]}\n'
233 else:
234 message += 'Lead: N/A\n'
235
236 message += f'Commenter: *{commenter}*'
237 comment_on_thread(message)
238
239
240 def time_since(hours):
241 """Returns datetime and string representations of the current time, minus the given hour"""
242 now = datetime.now()
243 # XXX : Add a minute or two to the delta (to avoid dropping issues)?
244 since = now - timedelta(hours=hours)
245 return since, since.strftime('%Y-%m-%dT%H:%M:%S')
246
247
248 def start_job(args: argparse.Namespace):
249 """
250 Starts the new comment digest job.
251 """
252 since, date_string = time_since(args.hours)
253 issues = fetch_issues(date_string)
254
255 filtered_issues = filter_issues(issues, since)
256 publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)
257 print('Digest posted to Slack.')
258
259
260 def _get_parser() -> argparse.ArgumentParser:
261 """
262 Creates and returns an ArgumentParser containing default values which were
263 read from the config file.
264 """
265 parser = argparse.ArgumentParser(description=__doc__)
266 parser.add_argument(
267 'hours',
268 help='Fetch issues that have been updated since this many hours ago',
269 type=int,
270 )
271 parser.add_argument(
272 'channel',
273 help="Issues will be published to this Slack channel",
274 type=str,
275 )
276 parser.add_argument(
277 'slack_token',
278 metavar='slack-token',
279 help='Slack auth token',
280 type=str,
281 )
282
283 return parser
284
285
286 if __name__ == '__main__':
287 # Process command-line arguments and starts the notification job
288 parser = _get_parser()
289 args = parser.parse_args()
290
291 # If found, add token to GitHub request headers:
292 github_token = os.environ.get('GITHUB_TOKEN', '')
293 if github_token:
294 github_headers['Authorization'] = f'Bearer {github_token}'
295 start_job(args)
296
[end of scripts/gh_scripts/issue_comment_bot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/gh_scripts/issue_comment_bot.py b/scripts/gh_scripts/issue_comment_bot.py
--- a/scripts/gh_scripts/issue_comment_bot.py
+++ b/scripts/gh_scripts/issue_comment_bot.py
@@ -245,6 +245,16 @@
return since, since.strftime('%Y-%m-%dT%H:%M:%S')
+def add_label_to_issues(issues):
+ for issue in issues:
+ issue_labels_url = f"https://api.github.com/repos/internetarchive/openlibrary/issues/{issue['number']}/labels"
+ response = requests.post(
+ issue_labels_url,
+ json={"labels": ["Needs: Response"]},
+ headers=github_headers,
+ )
+
+
def start_job(args: argparse.Namespace):
"""
Starts the new comment digest job.
@@ -253,6 +263,7 @@
issues = fetch_issues(date_string)
filtered_issues = filter_issues(issues, since)
+ add_label_to_issues(filtered_issues)
publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)
print('Digest posted to Slack.')
|
{"golden_diff": "diff --git a/scripts/gh_scripts/issue_comment_bot.py b/scripts/gh_scripts/issue_comment_bot.py\n--- a/scripts/gh_scripts/issue_comment_bot.py\n+++ b/scripts/gh_scripts/issue_comment_bot.py\n@@ -245,6 +245,16 @@\n return since, since.strftime('%Y-%m-%dT%H:%M:%S')\n \n \n+def add_label_to_issues(issues):\n+ for issue in issues:\n+ issue_labels_url = f\"https://api.github.com/repos/internetarchive/openlibrary/issues/{issue['number']}/labels\"\n+ response = requests.post(\n+ issue_labels_url,\n+ json={\"labels\": [\"Needs: Response\"]},\n+ headers=github_headers,\n+ )\n+\n+\n def start_job(args: argparse.Namespace):\n \"\"\"\n Starts the new comment digest job.\n@@ -253,6 +263,7 @@\n issues = fetch_issues(date_string)\n \n filtered_issues = filter_issues(issues, since)\n+ add_label_to_issues(filtered_issues)\n publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)\n print('Digest posted to Slack.')\n", "issue": "Update Github Action \"Recent Comment Slack Bot\" to mark issues as Needs: Response \n<!-- IMPORTANT: Before posting, be sure to redact or remove sensitive data, such as passwords, secret keys, session cookies, etc. -->\r\n\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n```\r\ndef add_label_to_issues(ids):\r\n owner = 'internetarchive'\r\n repo = 'openlibrary'\r\n labels_url = f\"https://api.github.com/repos/{owner}/{repo}/issues/labels\"\r\n payload = {\"labels\": [\"Needs: Response\"]}\r\n for issue_number in ids:\r\n issue_labels_url = f\"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels\"\r\n response = requests.post(issue_labels_url, json=payload, headers=github_headers)\r\n\r\nif __name__ == \"__main__\":\r\n add_label_to_issues([9054, 9065])\r\n```\r\n\r\nto https://github.com/internetarchive/openlibrary/blob/master/scripts/gh_scripts/issue_comment_bot.py#L255\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nFetches Open Library GitHub issues that have been commented on\nwithin some amount of time, in hours.\n\nWrites links to each issue to given Slack channel.\n\"\"\"\nimport argparse\nimport errno\nimport os\nimport sys\nimport time\n\nfrom datetime import datetime, timedelta\nfrom typing import Any\n\nimport requests\n\n# Maps lead label to GitHub username\nlead_label_to_username = {\n 'Lead: @mekarpeles': 'mekarpeles',\n 'Lead: @cdrini': 'cdrini',\n 'Lead: @scottbarnes': 'scottbarnes',\n 'Lead: @seabelis': 'seabelis',\n 'Lead: @jimchamp': 'jimchamp',\n}\n\n# Maps GitHub username to Slack ID\nusername_to_slack_id = {\n 'mekarpeles': '<@mek>',\n 'cdrini': '<@cdrini>',\n 'scottbarnes': '<@U03MNR6T7FH>',\n 'seabelis': '<@UAHQ39ACT>',\n 'jimchamp': '<@U01ARTHG9EV>',\n 'hornc': '<@U0EUS8DV0>',\n}\n\ngithub_headers = {\n 'X-GitHub-Api-Version': '2022-11-28',\n 'Accept': 'application/vnd.github+json',\n}\n\n\ndef fetch_issues(updated_since: str):\n \"\"\"\n Fetches all GitHub issues that have been updated since the given date string and have at least one comment.\n\n GitHub results are paginated. This functions appends each result to a list, and does so for all pages.\n To keep API calls to a minimum, we request the maximum number of results per request (100 per page, as of writing).\n\n Important: Updated issues need not have a recent comment. Update events include many other things, such as adding a\n label to an issue, or moving an issue to a milestone. Issues returned by this function will require additional\n processing in order to determine if they have recent comments.\n \"\"\"\n # Make initial query for updated issues:\n query = f'repo:internetarchive/openlibrary is:open is:issue comments:>0 updated:>{updated_since}'\n p: dict[str, str | int] = {\n 'q': query,\n 'per_page': 100,\n }\n response = requests.get(\n 'https://api.github.com/search/issues', params=p, headers=github_headers\n )\n d = response.json()\n results = d['items']\n\n # Fetch additional updated issues, if any exist\n def get_next_page(url: str):\n \"\"\"Returns list of issues and optional url for next page\"\"\"\n resp = requests.get(url, headers=github_headers)\n # Get issues\n d = resp.json()\n issues = d['items']\n # Prepare url for next page\n next = resp.links.get('next', {})\n next_url = next.get('url', '')\n\n return issues, next_url\n\n links = response.links\n next = links.get('next', {})\n next_url = next.get('url', '')\n while next_url:\n # Make call with next link\n issues, next_url = get_next_page(next_url)\n results = results + issues\n\n return results\n\n\ndef filter_issues(issues: list, since: datetime):\n \"\"\"\n Returns list of issues that were not last responded to by staff.\n Requires fetching the most recent comments for the given issues.\n \"\"\"\n results = []\n\n for i in issues:\n # Fetch comments using URL from previous GitHub search results\n comments_url = i.get('comments_url')\n resp = requests.get(\n comments_url, params={'per_page': 100}, headers=github_headers\n )\n\n # Ensure that we have the last page of comments\n links = resp.links\n last = links.get('last', {})\n last_url = last.get('url', '')\n\n if last_url:\n resp = requests.get(last_url, headers=github_headers)\n\n # Get last comment\n comments = resp.json()\n last_comment = comments[-1]\n\n # Determine if last comment meets our criteria for Slack notifications\n # First step: Ensure that the last comment was left after the given `since` datetime\n created = datetime.fromisoformat(last_comment['created_at'])\n # Removing timezone info to avoid TypeErrors, which occur when\n # comparing a timezone-aware datetime with a timezone-naive datetime\n created = created.replace(tzinfo=None)\n if created > since:\n # Next step: Determine if the last commenter is a staff member\n last_commenter = last_comment['user']['login']\n if last_commenter not in username_to_slack_id:\n lead_label = find_lead_label(i.get('labels', []))\n results.append(\n {\n 'comment_url': last_comment['html_url'],\n 'commenter': last_commenter,\n 'issue_title': i['title'],\n 'lead_label': lead_label,\n }\n )\n\n return results\n\n\ndef find_lead_label(labels: list[dict[str, Any]]) -> str:\n \"\"\"\n Finds and returns the name of the first lead label found in the given list of GitHub labels.\n\n Returns an empty string if no lead label is found\n \"\"\"\n result = ''\n for label in labels:\n if label['name'].startswith('Lead:'):\n result = label['name']\n break\n\n return result\n\n\ndef publish_digest(\n issues: list[dict[str, str]],\n slack_channel: str,\n slack_token: str,\n hours_passed: int,\n):\n \"\"\"\n Creates a threaded Slack messaged containing a digest of recently commented GitHub issues.\n\n Parent Slack message will say how many comments were left, and the timeframe. Each reply\n will include a link to the comment, as well as additional information.\n \"\"\"\n # Create the parent message\n parent_thread_msg = (\n f'{len(issues)} new GitHub comment(s) since {hours_passed} hour(s) ago'\n )\n\n response = requests.post(\n 'https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': f\"Bearer {slack_token}\",\n 'Content-Type': 'application/json; charset=utf-8',\n },\n json={\n 'channel': slack_channel,\n 'text': parent_thread_msg,\n },\n )\n\n if response.status_code != 200:\n # XXX : Log this\n print(f'Failed to send message to Slack. Status code: {response.status_code}')\n # XXX : Add retry logic?\n sys.exit(errno.ECOMM)\n\n d = response.json()\n # Store timestamp, which, along with the channel, uniquely identifies the parent thread\n ts = d.get('ts')\n\n def comment_on_thread(message: str):\n \"\"\"\n Posts the given message as a reply to the parent message.\n \"\"\"\n response = requests.post(\n 'https://slack.com/api/chat.postMessage',\n headers={\n 'Authorization': f\"Bearer {slack_token}\",\n 'Content-Type': 'application/json; charset=utf-8',\n },\n json={\n 'channel': slack_channel,\n 'text': message,\n 'thread_ts': ts,\n },\n )\n if response.status_code != 200:\n # XXX : Check \"ok\" field for errors\n # XXX : Log this\n print(\n f'Failed to POST slack message\\n Status code: {response.status_code}\\n Message: {message}'\n )\n # XXX : Retry logic?\n\n for i in issues:\n # Slack rate limit is roughly 1 request per second\n time.sleep(1)\n\n comment_url = i['comment_url']\n issue_title = i['issue_title']\n commenter = i['commenter']\n message = f'<{comment_url}|Latest comment for: *{issue_title}*>\\n'\n\n username = lead_label_to_username.get(i['lead_label'], '')\n slack_id = username_to_slack_id.get(username, '')\n if slack_id:\n message += f'Lead: {slack_id}\\n'\n elif i['lead_label']:\n message += f'{i[\"lead_label\"]}\\n'\n else:\n message += 'Lead: N/A\\n'\n\n message += f'Commenter: *{commenter}*'\n comment_on_thread(message)\n\n\ndef time_since(hours):\n \"\"\"Returns datetime and string representations of the current time, minus the given hour\"\"\"\n now = datetime.now()\n # XXX : Add a minute or two to the delta (to avoid dropping issues)?\n since = now - timedelta(hours=hours)\n return since, since.strftime('%Y-%m-%dT%H:%M:%S')\n\n\ndef start_job(args: argparse.Namespace):\n \"\"\"\n Starts the new comment digest job.\n \"\"\"\n since, date_string = time_since(args.hours)\n issues = fetch_issues(date_string)\n\n filtered_issues = filter_issues(issues, since)\n publish_digest(filtered_issues, args.channel, args.slack_token, args.hours)\n print('Digest posted to Slack.')\n\n\ndef _get_parser() -> argparse.ArgumentParser:\n \"\"\"\n Creates and returns an ArgumentParser containing default values which were\n read from the config file.\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n 'hours',\n help='Fetch issues that have been updated since this many hours ago',\n type=int,\n )\n parser.add_argument(\n 'channel',\n help=\"Issues will be published to this Slack channel\",\n type=str,\n )\n parser.add_argument(\n 'slack_token',\n metavar='slack-token',\n help='Slack auth token',\n type=str,\n )\n\n return parser\n\n\nif __name__ == '__main__':\n # Process command-line arguments and starts the notification job\n parser = _get_parser()\n args = parser.parse_args()\n\n # If found, add token to GitHub request headers:\n github_token = os.environ.get('GITHUB_TOKEN', '')\n if github_token:\n github_headers['Authorization'] = f'Bearer {github_token}'\n start_job(args)\n", "path": "scripts/gh_scripts/issue_comment_bot.py"}]}
| 3,848 | 248 |
gh_patches_debug_14522
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1302
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to access Manage Domain for a transition domain
### Current Behavior
Logged in as a full access user I manually created a domain that just had domain and domain info, but no application. When going to its Manage Domain page via the django admin domain object I got a 500 error and the logs showed an Exception was thrown. This was to simulate a transition domain and looks like this bug would affect analysts too.
### Expected Behavior
Expected as I am full access to not see a 500 error going to the manage domain page. Nothing should have crashed.
### Steps to Reproduce
1. Log in as full access or admin user
2. Create a domain with a domain information object
3. click the manage domain button on the Domain object
4. see a 500 error
### Environment
_No response_
### Additional Context

This line "if requested_domain.domain_application.status not in valid_domain_statuses" in the error message above is the cause of the problem. it should not be checking domain application and needs to check domain information instead.
### Issue Links
_No response_
</issue>
<code>
[start of src/registrar/views/utility/mixins.py]
1 """Permissions-related mixin classes."""
2
3 from django.contrib.auth.mixins import PermissionRequiredMixin
4
5 from registrar.models import (
6 DomainApplication,
7 DomainInvitation,
8 DomainInformation,
9 UserDomainRole,
10 )
11 import logging
12
13
14 logger = logging.getLogger(__name__)
15
16
17 class PermissionsLoginMixin(PermissionRequiredMixin):
18
19 """Mixin that redirects to login page if not logged in, otherwise 403."""
20
21 def handle_no_permission(self):
22 self.raise_exception = self.request.user.is_authenticated
23 return super().handle_no_permission()
24
25
26 class DomainPermission(PermissionsLoginMixin):
27
28 """Does the logged-in user have access to this domain?"""
29
30 def has_permission(self):
31 """Check if this user has access to this domain.
32
33 The user is in self.request.user and the domain needs to be looked
34 up from the domain's primary key in self.kwargs["pk"]
35 """
36
37 if not self.request.user.is_authenticated:
38 return False
39
40 if self.request.user.is_restricted():
41 return False
42
43 pk = self.kwargs["pk"]
44 # If pk is none then something went very wrong...
45 if pk is None:
46 raise ValueError("Primary key is None")
47
48 if self.can_access_other_user_domains(pk):
49 return True
50
51 # user needs to have a role on the domain
52 if not UserDomainRole.objects.filter(
53 user=self.request.user, domain__id=pk
54 ).exists():
55 return False
56
57 # if we need to check more about the nature of role, do it here.
58 return True
59
60 def can_access_other_user_domains(self, pk):
61 """Checks to see if an authorized user (staff or superuser)
62 can access a domain that they did not create or was invited to.
63 """
64
65 # Check if the user is permissioned...
66 user_is_analyst_or_superuser = self.request.user.has_perm(
67 "registrar.analyst_access_permission"
68 ) or self.request.user.has_perm("registrar.full_access_permission")
69
70 if not user_is_analyst_or_superuser:
71 return False
72
73 # Check if the user is attempting a valid edit action.
74 # In other words, if the analyst/admin did not click
75 # the 'Manage Domain' button in /admin,
76 # then they cannot access this page.
77 session = self.request.session
78 can_do_action = (
79 "analyst_action" in session
80 and "analyst_action_location" in session
81 and session["analyst_action_location"] == pk
82 )
83
84 if not can_do_action:
85 return False
86
87 # Analysts may manage domains, when they are in these statuses:
88 valid_domain_statuses = [
89 DomainApplication.APPROVED,
90 DomainApplication.IN_REVIEW,
91 DomainApplication.REJECTED,
92 DomainApplication.ACTION_NEEDED,
93 # Edge case - some domains do not have
94 # a status or DomainInformation... aka a status of 'None'.
95 # It is necessary to access those to correct errors.
96 None,
97 ]
98
99 requested_domain = None
100 if DomainInformation.objects.filter(id=pk).exists():
101 requested_domain = DomainInformation.objects.get(id=pk)
102
103 if requested_domain.domain_application.status not in valid_domain_statuses:
104 return False
105
106 # Valid session keys exist,
107 # the user is permissioned,
108 # and it is in a valid status
109 return True
110
111
112 class DomainApplicationPermission(PermissionsLoginMixin):
113
114 """Does the logged-in user have access to this domain application?"""
115
116 def has_permission(self):
117 """Check if this user has access to this domain application.
118
119 The user is in self.request.user and the domain needs to be looked
120 up from the domain's primary key in self.kwargs["pk"]
121 """
122 if not self.request.user.is_authenticated:
123 return False
124
125 # user needs to be the creator of the application
126 # this query is empty if there isn't a domain application with this
127 # id and this user as creator
128 if not DomainApplication.objects.filter(
129 creator=self.request.user, id=self.kwargs["pk"]
130 ).exists():
131 return False
132
133 return True
134
135
136 class ApplicationWizardPermission(PermissionsLoginMixin):
137
138 """Does the logged-in user have permission to start or edit an application?"""
139
140 def has_permission(self):
141 """Check if this user has permission to start or edit an application.
142
143 The user is in self.request.user
144 """
145
146 # The user has an ineligible flag
147 if self.request.user.is_restricted():
148 return False
149
150 return True
151
152
153 class DomainInvitationPermission(PermissionsLoginMixin):
154
155 """Does the logged-in user have access to this domain invitation?
156
157 A user has access to a domain invitation if they have a role on the
158 associated domain.
159 """
160
161 def has_permission(self):
162 """Check if this user has a role on the domain of this invitation."""
163 if not self.request.user.is_authenticated:
164 return False
165
166 if not DomainInvitation.objects.filter(
167 id=self.kwargs["pk"], domain__permissions__user=self.request.user
168 ).exists():
169 return False
170
171 return True
172
[end of src/registrar/views/utility/mixins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/registrar/views/utility/mixins.py b/src/registrar/views/utility/mixins.py
--- a/src/registrar/views/utility/mixins.py
+++ b/src/registrar/views/utility/mixins.py
@@ -100,7 +100,15 @@
if DomainInformation.objects.filter(id=pk).exists():
requested_domain = DomainInformation.objects.get(id=pk)
- if requested_domain.domain_application.status not in valid_domain_statuses:
+ # if no domain information or application exist, the user
+ # should be able to manage the domain; however, if domain information
+ # and domain application exist, and application is not in valid status,
+ # user should not be able to manage domain
+ if (
+ requested_domain
+ and requested_domain.domain_application
+ and requested_domain.domain_application.status not in valid_domain_statuses
+ ):
return False
# Valid session keys exist,
|
{"golden_diff": "diff --git a/src/registrar/views/utility/mixins.py b/src/registrar/views/utility/mixins.py\n--- a/src/registrar/views/utility/mixins.py\n+++ b/src/registrar/views/utility/mixins.py\n@@ -100,7 +100,15 @@\n if DomainInformation.objects.filter(id=pk).exists():\n requested_domain = DomainInformation.objects.get(id=pk)\n \n- if requested_domain.domain_application.status not in valid_domain_statuses:\n+ # if no domain information or application exist, the user\n+ # should be able to manage the domain; however, if domain information\n+ # and domain application exist, and application is not in valid status,\n+ # user should not be able to manage domain\n+ if (\n+ requested_domain\n+ and requested_domain.domain_application\n+ and requested_domain.domain_application.status not in valid_domain_statuses\n+ ):\n return False\n \n # Valid session keys exist,\n", "issue": "Unable to access Manage Domain for a transition domain\n### Current Behavior\n\nLogged in as a full access user I manually created a domain that just had domain and domain info, but no application. When going to its Manage Domain page via the django admin domain object I got a 500 error and the logs showed an Exception was thrown. This was to simulate a transition domain and looks like this bug would affect analysts too. \n\n### Expected Behavior\n\nExpected as I am full access to not see a 500 error going to the manage domain page. Nothing should have crashed.\n\n### Steps to Reproduce\n\n1. Log in as full access or admin user\r\n2. Create a domain with a domain information object \r\n3. click the manage domain button on the Domain object\r\n4. see a 500 error\n\n### Environment\n\n_No response_\n\n### Additional Context\n\n\r\nThis line \"if requested_domain.domain_application.status not in valid_domain_statuses\" in the error message above is the cause of the problem. it should not be checking domain application and needs to check domain information instead. \n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "\"\"\"Permissions-related mixin classes.\"\"\"\n\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\n\nfrom registrar.models import (\n DomainApplication,\n DomainInvitation,\n DomainInformation,\n UserDomainRole,\n)\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PermissionsLoginMixin(PermissionRequiredMixin):\n\n \"\"\"Mixin that redirects to login page if not logged in, otherwise 403.\"\"\"\n\n def handle_no_permission(self):\n self.raise_exception = self.request.user.is_authenticated\n return super().handle_no_permission()\n\n\nclass DomainPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain?\"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has access to this domain.\n\n The user is in self.request.user and the domain needs to be looked\n up from the domain's primary key in self.kwargs[\"pk\"]\n \"\"\"\n\n if not self.request.user.is_authenticated:\n return False\n\n if self.request.user.is_restricted():\n return False\n\n pk = self.kwargs[\"pk\"]\n # If pk is none then something went very wrong...\n if pk is None:\n raise ValueError(\"Primary key is None\")\n\n if self.can_access_other_user_domains(pk):\n return True\n\n # user needs to have a role on the domain\n if not UserDomainRole.objects.filter(\n user=self.request.user, domain__id=pk\n ).exists():\n return False\n\n # if we need to check more about the nature of role, do it here.\n return True\n\n def can_access_other_user_domains(self, pk):\n \"\"\"Checks to see if an authorized user (staff or superuser)\n can access a domain that they did not create or was invited to.\n \"\"\"\n\n # Check if the user is permissioned...\n user_is_analyst_or_superuser = self.request.user.has_perm(\n \"registrar.analyst_access_permission\"\n ) or self.request.user.has_perm(\"registrar.full_access_permission\")\n\n if not user_is_analyst_or_superuser:\n return False\n\n # Check if the user is attempting a valid edit action.\n # In other words, if the analyst/admin did not click\n # the 'Manage Domain' button in /admin,\n # then they cannot access this page.\n session = self.request.session\n can_do_action = (\n \"analyst_action\" in session\n and \"analyst_action_location\" in session\n and session[\"analyst_action_location\"] == pk\n )\n\n if not can_do_action:\n return False\n\n # Analysts may manage domains, when they are in these statuses:\n valid_domain_statuses = [\n DomainApplication.APPROVED,\n DomainApplication.IN_REVIEW,\n DomainApplication.REJECTED,\n DomainApplication.ACTION_NEEDED,\n # Edge case - some domains do not have\n # a status or DomainInformation... aka a status of 'None'.\n # It is necessary to access those to correct errors.\n None,\n ]\n\n requested_domain = None\n if DomainInformation.objects.filter(id=pk).exists():\n requested_domain = DomainInformation.objects.get(id=pk)\n\n if requested_domain.domain_application.status not in valid_domain_statuses:\n return False\n\n # Valid session keys exist,\n # the user is permissioned,\n # and it is in a valid status\n return True\n\n\nclass DomainApplicationPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain application?\"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has access to this domain application.\n\n The user is in self.request.user and the domain needs to be looked\n up from the domain's primary key in self.kwargs[\"pk\"]\n \"\"\"\n if not self.request.user.is_authenticated:\n return False\n\n # user needs to be the creator of the application\n # this query is empty if there isn't a domain application with this\n # id and this user as creator\n if not DomainApplication.objects.filter(\n creator=self.request.user, id=self.kwargs[\"pk\"]\n ).exists():\n return False\n\n return True\n\n\nclass ApplicationWizardPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have permission to start or edit an application?\"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has permission to start or edit an application.\n\n The user is in self.request.user\n \"\"\"\n\n # The user has an ineligible flag\n if self.request.user.is_restricted():\n return False\n\n return True\n\n\nclass DomainInvitationPermission(PermissionsLoginMixin):\n\n \"\"\"Does the logged-in user have access to this domain invitation?\n\n A user has access to a domain invitation if they have a role on the\n associated domain.\n \"\"\"\n\n def has_permission(self):\n \"\"\"Check if this user has a role on the domain of this invitation.\"\"\"\n if not self.request.user.is_authenticated:\n return False\n\n if not DomainInvitation.objects.filter(\n id=self.kwargs[\"pk\"], domain__permissions__user=self.request.user\n ).exists():\n return False\n\n return True\n", "path": "src/registrar/views/utility/mixins.py"}]}
| 2,337 | 204 |
gh_patches_debug_8784
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-632
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_last_root_page template tag not working
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
The `get_last_root_page` template tag is not working:
https://github.com/Integreat/integreat-cms/blob/develop/src/cms/templatetags/page_filters.py#L10-L20
This is because in the page QuerySet aggregation, the difference() function is used:
https://github.com/Integreat/integreat-cms/blob/a285cb3c74b2a2c501147076338e2b2a70c89bd6/src/cms/models/regions/region.py#L177
After difference(), it is not possible to use filter(), see https://docs.djangoproject.com/en/2.2/ref/models/querysets/#union
This leads to the last drop-region in the page tree being incorrectly tied to the last page in the list, not necessarily the last root page.
### Steps to Reproduce
1. Go to Page tree
2. Drag & drop page to most bottom line
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
The page should be placed next to the last root page
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
The page is placed next to the last page in the list
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
</issue>
<code>
[start of src/cms/templatetags/page_filters.py]
1 """
2 This is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.
3 """
4 from django import template
5
6 register = template.Library()
7
8
9 @register.simple_tag
10 def get_last_root_page(pages):
11 """
12 This tag returns the last page on the root level.
13
14 :param pages: The requested page tree
15 :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]
16
17 :return: The last root page of the given :class:`~django.db.models.query.QuerySet`
18 :rtype: ~cms.models.pages.page.Page
19 """
20 return pages.filter(parent=None).last()
21
[end of src/cms/templatetags/page_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py
--- a/src/cms/templatetags/page_filters.py
+++ b/src/cms/templatetags/page_filters.py
@@ -12,9 +12,9 @@
This tag returns the last page on the root level.
:param pages: The requested page tree
- :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]
+ :type pages: list [ ~cms.models.pages.page.Page ]
- :return: The last root page of the given :class:`~django.db.models.query.QuerySet`
+ :return: The last root page of the given page list
:rtype: ~cms.models.pages.page.Page
"""
- return pages.filter(parent=None).last()
+ return list(filter(lambda p: not p.parent, pages))[-1]
|
{"golden_diff": "diff --git a/src/cms/templatetags/page_filters.py b/src/cms/templatetags/page_filters.py\n--- a/src/cms/templatetags/page_filters.py\n+++ b/src/cms/templatetags/page_filters.py\n@@ -12,9 +12,9 @@\n This tag returns the last page on the root level.\n \n :param pages: The requested page tree\n- :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]\n+ :type pages: list [ ~cms.models.pages.page.Page ]\n \n- :return: The last root page of the given :class:`~django.db.models.query.QuerySet`\n+ :return: The last root page of the given page list\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n- return pages.filter(parent=None).last()\n+ return list(filter(lambda p: not p.parent, pages))[-1]\n", "issue": "get_last_root_page template tag not working\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nThe `get_last_root_page` template tag is not working:\r\nhttps://github.com/Integreat/integreat-cms/blob/develop/src/cms/templatetags/page_filters.py#L10-L20\r\n\r\nThis is because in the page QuerySet aggregation, the difference() function is used:\r\nhttps://github.com/Integreat/integreat-cms/blob/a285cb3c74b2a2c501147076338e2b2a70c89bd6/src/cms/models/regions/region.py#L177\r\n\r\nAfter difference(), it is not possible to use filter(), see https://docs.djangoproject.com/en/2.2/ref/models/querysets/#union\r\n\r\nThis leads to the last drop-region in the page tree being incorrectly tied to the last page in the list, not necessarily the last root page.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to Page tree\r\n2. Drag & drop page to most bottom line\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe page should be placed next to the last root page\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nThe page is placed next to the last page in the list\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis is a collection of tags and filters for :class:`~cms.models.pages.page.Page` objects.\n\"\"\"\nfrom django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_last_root_page(pages):\n \"\"\"\n This tag returns the last page on the root level.\n\n :param pages: The requested page tree\n :type pages: ~mptt.querysets.TreeQuerySet [ ~cms.models.pages.page.Page ]\n\n :return: The last root page of the given :class:`~django.db.models.query.QuerySet`\n :rtype: ~cms.models.pages.page.Page\n \"\"\"\n return pages.filter(parent=None).last()\n", "path": "src/cms/templatetags/page_filters.py"}]}
| 1,041 | 203 |
gh_patches_debug_15979
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-11239
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError in dockwidgets tabbar
## Description
### What steps will reproduce the problem?
<!--- You can use Markdown here --->
i dont know, im beginner in this program, so im watching a tutorial
### Traceback
```python-traceback
File "/Users/luismireles/opt/anaconda3/envs/K37/lib/python3.7/site-packages/spyder/widgets/dock.py", line 43, in eventFilter
self.tab_pressed(event)
File "/Users/luismireles/opt/anaconda3/envs/K37/lib/python3.7/site-packages/spyder/widgets/dock.py", line 52, in tab_pressed
if event.button() == Qt.RightButton:
AttributeError: 'QContextMenuEvent' object has no attribute 'button'
```
## Versions
* Spyder version: 4.0.0
* Python version: 3.7.0
* Qt version: 5.9.6
* PyQt5 version: 5.9.2
* Operating System: Darwin 19.2.0
### Dependencies
```
cloudpickle >=0.5.0 : 1.2.2 (OK)
pygments >=2.0 : 2.5.2 (OK)
qtconsole >=4.6.0 : 4.6.0 (OK)
nbconvert >=4.0 : 5.6.1 (OK)
sphinx >=0.6.6 : 2.3.0 (OK)
pylint >=0.25 : 2.4.4 (OK)
psutil >=0.3 : 5.6.7 (OK)
qtawesome >=0.5.7 : 0.6.0 (OK)
qtpy >=1.5.0 : 1.9.0 (OK)
pickleshare >=0.4 : 0.7.5 (OK)
zmq >=17 : 18.1.0 (OK)
chardet >=2.0.0 : 3.0.4 (OK)
numpydoc >=0.6.0 : 0.9.1 (OK)
spyder_kernels >=1.8.1;<2.0.0: 1.8.1 (OK)
qdarkstyle >=2.7 : 2.7 (OK)
atomicwrites >=1.2.0 : 1.3.0 (OK)
diff_match_patch >=20181111 : 20181111 (OK)
intervaltree : None (OK)
watchdog : None (OK)
keyring : None (OK)
pexpect >=4.4.0 : 4.7.0 (OK)
pympler : None (OK)
sympy >=0.7.3 : None (NOK)
cython >=0.21 : 0.29.14 (OK)
IPython >=4.0 : 7.10.2 (OK)
matplotlib >=2.0.0 : 3.1.1 (OK)
pandas >=0.13.1 : 0.25.3 (OK)
numpy >=1.7 : 1.17.4 (OK)
scipy >=0.17.0 : 1.3.1 (OK)
pyls >=0.31.2;<0.32.0 : 0.31.2 (OK)
applaunchservices >=0.1.7 : 0.2.1 (OK)
rtree >=0.8.3 : 0.8.3 (OK)
```
</issue>
<code>
[start of spyder/widgets/dock.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Dock widgets for plugins
9 """
10
11 from qtpy.QtCore import QEvent, QObject, Qt, QSize, Signal
12 from qtpy.QtWidgets import (QApplication, QDockWidget, QHBoxLayout,
13 QSizePolicy, QStyle, QTabBar, QToolButton,
14 QWidget)
15
16 from spyder.config.gui import is_dark_interface
17 from spyder.utils import icon_manager as ima
18
19
20 # =============================================================================
21 # Tab filter
22 # =============================================================================
23 class TabFilter(QObject):
24 """Filter event attached to each DockWidget QTabBar."""
25 def __init__(self, dock_tabbar, main):
26 QObject.__init__(self)
27 self.dock_tabbar = dock_tabbar
28 self.main = main
29 self.from_index = None
30
31 # Center dockwidget tabs to differentiate them from plugin tabs.
32 # See spyder-ide/spyder#9763
33 self.dock_tabbar.setStyleSheet("QTabBar {alignment: center;}")
34
35 def eventFilter(self, obj, event):
36 """Filter mouse press events.
37
38 Events that are captured and not propagated return True. Events that
39 are not captured and are propagated return False.
40 """
41 event_type = event.type()
42 if event_type == QEvent.MouseButtonPress:
43 self.tab_pressed(event)
44 return False
45 return False
46
47 def tab_pressed(self, event):
48 """Method called when a tab from a QTabBar has been pressed."""
49 self.from_index = self.dock_tabbar.tabAt(event.pos())
50 self.dock_tabbar.setCurrentIndex(self.from_index)
51
52 if event.button() == Qt.RightButton:
53 if self.from_index == -1:
54 self.show_nontab_menu(event)
55 else:
56 self.show_tab_menu(event)
57
58 def show_tab_menu(self, event):
59 """Show the context menu assigned to tabs."""
60 self.show_nontab_menu(event)
61
62 def show_nontab_menu(self, event):
63 """Show the context menu assigned to nontabs section."""
64 menu = self.main.createPopupMenu()
65 menu.exec_(self.dock_tabbar.mapToGlobal(event.pos()))
66
67
68 # =============================================================================
69 # Title bar
70 # =============================================================================
71 class DragButton(QToolButton):
72 """
73 Drag button for the title bar.
74
75 This button pass all its mouse events to its parent.
76 """
77
78 def __init__(self, parent, button_size):
79 super(QToolButton, self).__init__(parent)
80 self.parent = parent
81
82 # Style
83 self.setMaximumSize(button_size)
84 self.setAutoRaise(True)
85 self.setIcon(ima.icon('drag-horizontal'))
86 if is_dark_interface():
87 self.setStyleSheet(
88 "QToolButton {"
89 "border-radius: 0px;"
90 "border: 0px;"
91 "background-color: #32414B;}")
92 else:
93 self.setStyleSheet("QToolButton {border: 0px;}")
94
95 def mouseReleaseEvent(self, event):
96 self.parent.mouseReleaseEvent(event)
97
98 def mousePressEvent(self, event):
99 self.parent.mousePressEvent(event)
100
101 def mouseMoveEvent(self, event):
102 self.parent.mouseMoveEvent(event)
103
104
105 class CloseButton(QToolButton):
106 """Close button for the title bar."""
107
108 def __init__(self, parent, button_size):
109 super(QToolButton, self).__init__(parent)
110
111 # Style
112 self.setMaximumSize(button_size)
113 self.setAutoRaise(True)
114 self.setCursor(Qt.ArrowCursor)
115 if is_dark_interface():
116 self.setStyleSheet(
117 "QToolButton {"
118 "border-radius: 0px;"
119 "border: 0px;"
120 "image: url(:/qss_icons/rc/close.png);"
121 "background-color: #32414B;}"
122 "QToolButton:hover {"
123 "image: url(:/qss_icons/rc/close-hover.png);}")
124 else:
125 self.setIcon(QApplication.style().standardIcon(
126 QStyle.SP_DockWidgetCloseButton))
127
128
129 class DockTitleBar(QWidget):
130 """
131 Custom title bar for our dock widgets.
132
133 Inspired from
134 https://stackoverflow.com/a/40894225/438386
135 """
136
137 def __init__(self, parent):
138 super(DockTitleBar, self).__init__(parent)
139
140 icon_size = QApplication.style().standardIcon(
141 QStyle.SP_TitleBarNormalButton).actualSize(QSize(100, 100))
142 button_size = icon_size + QSize(8, 8)
143
144 left_spacer = QWidget(self)
145 left_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
146 if is_dark_interface():
147 left_spacer.setStyleSheet("background-color: #32414B")
148
149 drag_button = DragButton(self, button_size)
150
151 right_spacer = QWidget(self)
152 right_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
153 if is_dark_interface():
154 right_spacer.setStyleSheet("background-color: #32414B")
155
156 close_button = CloseButton(self, button_size)
157 close_button.clicked.connect(parent.sig_plugin_closed.emit)
158
159 hlayout = QHBoxLayout(self)
160 hlayout.setSpacing(0)
161 hlayout.setContentsMargins(0, 0, 0, 0)
162 hlayout.addWidget(left_spacer)
163 hlayout.addWidget(drag_button)
164 hlayout.addWidget(right_spacer)
165 hlayout.addWidget(close_button)
166
167 # To signal that dock widgets can be dragged from here
168 self.setCursor(Qt.SizeAllCursor)
169
170 def mouseReleaseEvent(self, event):
171 self.setCursor(Qt.SizeAllCursor)
172 QWidget.mouseReleaseEvent(self, event)
173
174 def mousePressEvent(self, event):
175 self.setCursor(Qt.ClosedHandCursor)
176 QWidget.mousePressEvent(self, event)
177
178 def mouseMoveEvent(self, event):
179 QWidget.mouseMoveEvent(self, event)
180 self.setCursor(Qt.SizeAllCursor)
181
182
183 class SpyderDockWidget(QDockWidget):
184 """Subclass to override needed methods"""
185 sig_plugin_closed = Signal()
186
187 def __init__(self, title, parent):
188 super(SpyderDockWidget, self).__init__(title, parent)
189
190 # Set our custom title bar
191 self.titlebar = DockTitleBar(self)
192 self.set_title_bar()
193
194 # Needed for the installation of the event filter
195 self.title = title
196 self.main = parent
197 self.dock_tabbar = None
198
199 # To track dockwidget changes the filter is installed when dockwidget
200 # visibility changes. This installs the filter on startup and also
201 # on dockwidgets that are undocked and then docked to a new location.
202 self.visibilityChanged.connect(self.install_tab_event_filter)
203
204 def closeEvent(self, event):
205 """
206 Reimplement Qt method to send a signal on close so that "Panes" main
207 window menu can be updated correctly
208 """
209 self.sig_plugin_closed.emit()
210
211 def install_tab_event_filter(self, value):
212 """
213 Install an event filter to capture mouse events in the tabs of a
214 QTabBar holding tabified dockwidgets.
215 """
216 dock_tabbar = None
217 tabbars = self.main.findChildren(QTabBar)
218 for tabbar in tabbars:
219 for tab in range(tabbar.count()):
220 title = tabbar.tabText(tab)
221 if title == self.title:
222 dock_tabbar = tabbar
223 break
224
225 if dock_tabbar is not None:
226 self.dock_tabbar = dock_tabbar
227 # Install filter only once per QTabBar
228 if getattr(self.dock_tabbar, 'filter', None) is None:
229 self.dock_tabbar.filter = TabFilter(self.dock_tabbar,
230 self.main)
231 self.dock_tabbar.installEventFilter(self.dock_tabbar.filter)
232
233 def set_title_bar(self):
234 """Set custom title bar."""
235 self.setTitleBarWidget(self.titlebar)
236
[end of spyder/widgets/dock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/widgets/dock.py b/spyder/widgets/dock.py
--- a/spyder/widgets/dock.py
+++ b/spyder/widgets/dock.py
@@ -49,11 +49,17 @@
self.from_index = self.dock_tabbar.tabAt(event.pos())
self.dock_tabbar.setCurrentIndex(self.from_index)
- if event.button() == Qt.RightButton:
- if self.from_index == -1:
- self.show_nontab_menu(event)
- else:
- self.show_tab_menu(event)
+ try:
+ if event.button() == Qt.RightButton:
+ if self.from_index == -1:
+ self.show_nontab_menu(event)
+ else:
+ self.show_tab_menu(event)
+ except AttributeError:
+ # Needed to avoid an error when generating the
+ # context menu on top of the tab.
+ # See spyder-ide/spyder#11226
+ pass
def show_tab_menu(self, event):
"""Show the context menu assigned to tabs."""
|
{"golden_diff": "diff --git a/spyder/widgets/dock.py b/spyder/widgets/dock.py\n--- a/spyder/widgets/dock.py\n+++ b/spyder/widgets/dock.py\n@@ -49,11 +49,17 @@\n self.from_index = self.dock_tabbar.tabAt(event.pos())\n self.dock_tabbar.setCurrentIndex(self.from_index)\n \n- if event.button() == Qt.RightButton:\n- if self.from_index == -1:\n- self.show_nontab_menu(event)\n- else:\n- self.show_tab_menu(event)\n+ try:\n+ if event.button() == Qt.RightButton:\n+ if self.from_index == -1:\n+ self.show_nontab_menu(event)\n+ else:\n+ self.show_tab_menu(event)\n+ except AttributeError:\n+ # Needed to avoid an error when generating the\n+ # context menu on top of the tab.\n+ # See spyder-ide/spyder#11226\n+ pass\n \n def show_tab_menu(self, event):\n \"\"\"Show the context menu assigned to tabs.\"\"\"\n", "issue": "AttributeError in dockwidgets tabbar\n## Description\n\n### What steps will reproduce the problem?\n\n<!--- You can use Markdown here --->\n\ni dont know, im beginner in this program, so im watching a tutorial\n\n### Traceback\n```python-traceback\n File \"/Users/luismireles/opt/anaconda3/envs/K37/lib/python3.7/site-packages/spyder/widgets/dock.py\", line 43, in eventFilter\n self.tab_pressed(event)\n File \"/Users/luismireles/opt/anaconda3/envs/K37/lib/python3.7/site-packages/spyder/widgets/dock.py\", line 52, in tab_pressed\n if event.button() == Qt.RightButton:\nAttributeError: 'QContextMenuEvent' object has no attribute 'button'\n```\n\n## Versions\n\n* Spyder version: 4.0.0 \n* Python version: 3.7.0\n* Qt version: 5.9.6\n* PyQt5 version: 5.9.2\n* Operating System: Darwin 19.2.0\n\n### Dependencies\n\n```\ncloudpickle >=0.5.0 : 1.2.2 (OK)\npygments >=2.0 : 2.5.2 (OK)\nqtconsole >=4.6.0 : 4.6.0 (OK)\nnbconvert >=4.0 : 5.6.1 (OK)\nsphinx >=0.6.6 : 2.3.0 (OK)\npylint >=0.25 : 2.4.4 (OK)\npsutil >=0.3 : 5.6.7 (OK)\nqtawesome >=0.5.7 : 0.6.0 (OK)\nqtpy >=1.5.0 : 1.9.0 (OK)\npickleshare >=0.4 : 0.7.5 (OK)\nzmq >=17 : 18.1.0 (OK)\nchardet >=2.0.0 : 3.0.4 (OK)\nnumpydoc >=0.6.0 : 0.9.1 (OK)\nspyder_kernels >=1.8.1;<2.0.0: 1.8.1 (OK)\nqdarkstyle >=2.7 : 2.7 (OK)\natomicwrites >=1.2.0 : 1.3.0 (OK)\ndiff_match_patch >=20181111 : 20181111 (OK)\nintervaltree : None (OK)\nwatchdog : None (OK)\nkeyring : None (OK)\npexpect >=4.4.0 : 4.7.0 (OK)\npympler : None (OK)\nsympy >=0.7.3 : None (NOK)\ncython >=0.21 : 0.29.14 (OK)\nIPython >=4.0 : 7.10.2 (OK)\nmatplotlib >=2.0.0 : 3.1.1 (OK)\npandas >=0.13.1 : 0.25.3 (OK)\nnumpy >=1.7 : 1.17.4 (OK)\nscipy >=0.17.0 : 1.3.1 (OK)\npyls >=0.31.2;<0.32.0 : 0.31.2 (OK)\napplaunchservices >=0.1.7 : 0.2.1 (OK)\nrtree >=0.8.3 : 0.8.3 (OK)\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nDock widgets for plugins\n\"\"\"\n\nfrom qtpy.QtCore import QEvent, QObject, Qt, QSize, Signal\nfrom qtpy.QtWidgets import (QApplication, QDockWidget, QHBoxLayout,\n QSizePolicy, QStyle, QTabBar, QToolButton,\n QWidget)\n\nfrom spyder.config.gui import is_dark_interface\nfrom spyder.utils import icon_manager as ima\n\n\n# =============================================================================\n# Tab filter\n# =============================================================================\nclass TabFilter(QObject):\n \"\"\"Filter event attached to each DockWidget QTabBar.\"\"\"\n def __init__(self, dock_tabbar, main):\n QObject.__init__(self)\n self.dock_tabbar = dock_tabbar\n self.main = main\n self.from_index = None\n\n # Center dockwidget tabs to differentiate them from plugin tabs.\n # See spyder-ide/spyder#9763\n self.dock_tabbar.setStyleSheet(\"QTabBar {alignment: center;}\")\n\n def eventFilter(self, obj, event):\n \"\"\"Filter mouse press events.\n\n Events that are captured and not propagated return True. Events that\n are not captured and are propagated return False.\n \"\"\"\n event_type = event.type()\n if event_type == QEvent.MouseButtonPress:\n self.tab_pressed(event)\n return False\n return False\n\n def tab_pressed(self, event):\n \"\"\"Method called when a tab from a QTabBar has been pressed.\"\"\"\n self.from_index = self.dock_tabbar.tabAt(event.pos())\n self.dock_tabbar.setCurrentIndex(self.from_index)\n\n if event.button() == Qt.RightButton:\n if self.from_index == -1:\n self.show_nontab_menu(event)\n else:\n self.show_tab_menu(event)\n\n def show_tab_menu(self, event):\n \"\"\"Show the context menu assigned to tabs.\"\"\"\n self.show_nontab_menu(event)\n\n def show_nontab_menu(self, event):\n \"\"\"Show the context menu assigned to nontabs section.\"\"\"\n menu = self.main.createPopupMenu()\n menu.exec_(self.dock_tabbar.mapToGlobal(event.pos()))\n\n\n# =============================================================================\n# Title bar\n# =============================================================================\nclass DragButton(QToolButton):\n \"\"\"\n Drag button for the title bar.\n\n This button pass all its mouse events to its parent.\n \"\"\"\n\n def __init__(self, parent, button_size):\n super(QToolButton, self).__init__(parent)\n self.parent = parent\n\n # Style\n self.setMaximumSize(button_size)\n self.setAutoRaise(True)\n self.setIcon(ima.icon('drag-horizontal'))\n if is_dark_interface():\n self.setStyleSheet(\n \"QToolButton {\"\n \"border-radius: 0px;\"\n \"border: 0px;\"\n \"background-color: #32414B;}\")\n else:\n self.setStyleSheet(\"QToolButton {border: 0px;}\")\n\n def mouseReleaseEvent(self, event):\n self.parent.mouseReleaseEvent(event)\n\n def mousePressEvent(self, event):\n self.parent.mousePressEvent(event)\n\n def mouseMoveEvent(self, event):\n self.parent.mouseMoveEvent(event)\n\n\nclass CloseButton(QToolButton):\n \"\"\"Close button for the title bar.\"\"\"\n\n def __init__(self, parent, button_size):\n super(QToolButton, self).__init__(parent)\n\n # Style\n self.setMaximumSize(button_size)\n self.setAutoRaise(True)\n self.setCursor(Qt.ArrowCursor)\n if is_dark_interface():\n self.setStyleSheet(\n \"QToolButton {\"\n \"border-radius: 0px;\"\n \"border: 0px;\"\n \"image: url(:/qss_icons/rc/close.png);\"\n \"background-color: #32414B;}\"\n \"QToolButton:hover {\"\n \"image: url(:/qss_icons/rc/close-hover.png);}\")\n else:\n self.setIcon(QApplication.style().standardIcon(\n QStyle.SP_DockWidgetCloseButton))\n\n\nclass DockTitleBar(QWidget):\n \"\"\"\n Custom title bar for our dock widgets.\n\n Inspired from\n https://stackoverflow.com/a/40894225/438386\n \"\"\"\n\n def __init__(self, parent):\n super(DockTitleBar, self).__init__(parent)\n\n icon_size = QApplication.style().standardIcon(\n QStyle.SP_TitleBarNormalButton).actualSize(QSize(100, 100))\n button_size = icon_size + QSize(8, 8)\n\n left_spacer = QWidget(self)\n left_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)\n if is_dark_interface():\n left_spacer.setStyleSheet(\"background-color: #32414B\")\n\n drag_button = DragButton(self, button_size)\n\n right_spacer = QWidget(self)\n right_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)\n if is_dark_interface():\n right_spacer.setStyleSheet(\"background-color: #32414B\")\n\n close_button = CloseButton(self, button_size)\n close_button.clicked.connect(parent.sig_plugin_closed.emit)\n\n hlayout = QHBoxLayout(self)\n hlayout.setSpacing(0)\n hlayout.setContentsMargins(0, 0, 0, 0)\n hlayout.addWidget(left_spacer)\n hlayout.addWidget(drag_button)\n hlayout.addWidget(right_spacer)\n hlayout.addWidget(close_button)\n\n # To signal that dock widgets can be dragged from here\n self.setCursor(Qt.SizeAllCursor)\n\n def mouseReleaseEvent(self, event):\n self.setCursor(Qt.SizeAllCursor)\n QWidget.mouseReleaseEvent(self, event)\n\n def mousePressEvent(self, event):\n self.setCursor(Qt.ClosedHandCursor)\n QWidget.mousePressEvent(self, event)\n\n def mouseMoveEvent(self, event):\n QWidget.mouseMoveEvent(self, event)\n self.setCursor(Qt.SizeAllCursor)\n\n\nclass SpyderDockWidget(QDockWidget):\n \"\"\"Subclass to override needed methods\"\"\"\n sig_plugin_closed = Signal()\n\n def __init__(self, title, parent):\n super(SpyderDockWidget, self).__init__(title, parent)\n\n # Set our custom title bar\n self.titlebar = DockTitleBar(self)\n self.set_title_bar()\n\n # Needed for the installation of the event filter\n self.title = title\n self.main = parent\n self.dock_tabbar = None\n\n # To track dockwidget changes the filter is installed when dockwidget\n # visibility changes. This installs the filter on startup and also\n # on dockwidgets that are undocked and then docked to a new location.\n self.visibilityChanged.connect(self.install_tab_event_filter)\n\n def closeEvent(self, event):\n \"\"\"\n Reimplement Qt method to send a signal on close so that \"Panes\" main\n window menu can be updated correctly\n \"\"\"\n self.sig_plugin_closed.emit()\n\n def install_tab_event_filter(self, value):\n \"\"\"\n Install an event filter to capture mouse events in the tabs of a\n QTabBar holding tabified dockwidgets.\n \"\"\"\n dock_tabbar = None\n tabbars = self.main.findChildren(QTabBar)\n for tabbar in tabbars:\n for tab in range(tabbar.count()):\n title = tabbar.tabText(tab)\n if title == self.title:\n dock_tabbar = tabbar\n break\n\n if dock_tabbar is not None:\n self.dock_tabbar = dock_tabbar\n # Install filter only once per QTabBar\n if getattr(self.dock_tabbar, 'filter', None) is None:\n self.dock_tabbar.filter = TabFilter(self.dock_tabbar,\n self.main)\n self.dock_tabbar.installEventFilter(self.dock_tabbar.filter)\n\n def set_title_bar(self):\n \"\"\"Set custom title bar.\"\"\"\n self.setTitleBarWidget(self.titlebar)\n", "path": "spyder/widgets/dock.py"}]}
| 3,718 | 241 |
gh_patches_debug_25293
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1797
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ip: MaxMind GeoLite2 Changes
### Description
MaxMind appears to now require an account to download the GeoLite2 databases, which Sopel currently uses. The geolite.maxmind.com domain, where Sopel downloads the databases, no longer exists, which is causing tests to fail.
### Reproduction steps
1. Clone master
2. Run `./pytest_run.py -v sopel/modules/ip.py`
3. Observe `sopel/modules/ip.py::test_example_ip_0 FAILED`
### Expected behavior
Test passes
### Notes
There seems to be an API with a [python library](https://pypi.org/project/geoip2/), but that also requires an account.
</issue>
<code>
[start of sopel/modules/ip.py]
1 # coding=utf-8
2 """
3 ip.py - Sopel GeoIP Lookup Module
4 Copyright 2011, Dimitri Molenaars, TyRope.nl,
5 Copyright © 2013, Elad Alfassa <[email protected]>
6 Licensed under the Eiffel Forum License 2.
7
8 https://sopel.chat
9 """
10
11 from __future__ import unicode_literals, absolute_import, print_function, division
12
13 import logging
14 import os
15 import socket
16 import tarfile
17
18 import geoip2.database
19
20 from sopel.config.types import FilenameAttribute, StaticSection
21 from sopel.module import commands, example
22
23 urlretrieve = None
24 try:
25 from urllib import urlretrieve
26 except ImportError:
27 try:
28 # urlretrieve has been put under urllib.request in Python 3.
29 # It's also deprecated so this should probably be replaced with
30 # urllib2.
31 from urllib.request import urlretrieve
32 except ImportError:
33 pass
34
35
36 LOGGER = logging.getLogger(__name__)
37
38
39 class GeoipSection(StaticSection):
40 GeoIP_db_path = FilenameAttribute('GeoIP_db_path', directory=True)
41 """Path of the directory containing the GeoIP database files."""
42
43
44 def configure(config):
45 """
46 | name | example | purpose |
47 | ---- | ------- | ------- |
48 | GeoIP\\_db\\_path | /home/sopel/GeoIP/ | Path to the GeoIP database files |
49 """
50 config.define_section('ip', GeoipSection)
51 config.ip.configure_setting('GeoIP_db_path',
52 'Path of the GeoIP db files')
53
54
55 def setup(bot):
56 bot.config.define_section('ip', GeoipSection)
57
58
59 def _decompress(source, target, delete_after_decompression=True):
60 """Decompress just the database from the archive"""
61 # https://stackoverflow.com/a/16452962
62 tar = tarfile.open(source)
63 for member in tar.getmembers():
64 if ".mmdb" in member.name:
65 member.name = os.path.basename(member.name)
66 tar.extract(member, target)
67 if delete_after_decompression:
68 os.remove(source)
69
70
71 def _find_geoip_db(bot):
72 """Find the GeoIP database"""
73 config = bot.config
74 if config.ip.GeoIP_db_path:
75 cities_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLite2-City.mmdb')
76 ipasnum_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLite2-ASN.mmdb')
77 if (os.path.isfile(cities_db) and os.path.isfile(ipasnum_db)):
78 return config.ip.GeoIP_db_path
79 else:
80 LOGGER.warning(
81 'GeoIP path configured but DB not found in configured path')
82
83 if (os.path.isfile(os.path.join(config.core.homedir, 'GeoLite2-City.mmdb')) and
84 os.path.isfile(os.path.join(config.core.homedir, 'GeoLite2-ASN.mmdb'))):
85 return config.core.homedir
86 elif (os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLite2-City.mmdb')) and
87 os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLite2-ASN.mmdb'))):
88 return '/usr/share/GeoIP'
89 elif urlretrieve:
90 LOGGER.info('Downloading GeoIP database')
91 bot.say('Downloading GeoIP database, please wait...')
92 geolite_urls = [
93 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz',
94 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz'
95 ]
96 for url in geolite_urls:
97 LOGGER.debug('GeoIP Source URL: %s', url)
98 full_path = os.path.join(config.core.homedir, url.split("/")[-1])
99 urlretrieve(url, full_path)
100 _decompress(full_path, config.core.homedir)
101 return bot.config.core.homedir
102 else:
103 return False
104
105
106 @commands('iplookup', 'ip')
107 @example('.ip 8.8.8.8',
108 r'\[IP\/Host Lookup\] Hostname: \S*dns\S*\.google\S* \| Location: United States \| ISP: AS15169 Google LLC',
109 re=True,
110 ignore='Downloading GeoIP database, please wait...',
111 online=True)
112 def ip(bot, trigger):
113 """IP Lookup tool"""
114 # Check if there is input at all
115 if not trigger.group(2):
116 return bot.reply("No search term.")
117 # Check whether the input is an IP or hostmask or a nickname
118 decide = ['.', ':']
119 if any(x in trigger.group(2) for x in decide):
120 # It's an IP/hostname!
121 query = trigger.group(2).strip()
122 else:
123 # Need to get the host for the username
124 username = trigger.group(2).strip()
125 user_in_botdb = bot.users.get(username)
126 if user_in_botdb is not None:
127 query = user_in_botdb.host
128
129 # Sanity check - sometimes user information isn't populated yet
130 if query is None:
131 return bot.say("I don't know that user's host.")
132 else:
133 return bot.say("I\'m not aware of this user.")
134
135 db_path = _find_geoip_db(bot)
136 if db_path is False:
137 LOGGER.error('Can\'t find (or download) usable GeoIP database.')
138 bot.say('Sorry, I don\'t have a GeoIP database to use for this lookup.')
139 return False
140
141 if ':' in query:
142 try:
143 socket.inet_pton(socket.AF_INET6, query)
144 except (OSError, socket.error): # Python 2/3 compatibility
145 return bot.say("[IP/Host Lookup] Unable to resolve IP/Hostname")
146 elif '.' in query:
147 try:
148 socket.inet_pton(socket.AF_INET, query)
149 except (socket.error, socket.herror):
150 try:
151 query = socket.getaddrinfo(query, None)[0][4][0]
152 except socket.gaierror:
153 return bot.say("[IP/Host Lookup] Unable to resolve IP/Hostname")
154 else:
155 return bot.say("[IP/Host Lookup] Unable to resolve IP/Hostname")
156
157 city = geoip2.database.Reader(os.path.join(db_path, 'GeoLite2-City.mmdb'))
158 asn = geoip2.database.Reader(os.path.join(db_path, 'GeoLite2-ASN.mmdb'))
159 host = socket.getfqdn(query)
160 try:
161 city_response = city.city(query)
162 asn_response = asn.asn(query)
163 except geoip2.errors.AddressNotFoundError:
164 return bot.say("[IP/Host Lookup] The address is not in the database.")
165
166 response = "[IP/Host Lookup] Hostname: %s" % host
167 try:
168 response += " | Location: %s" % city_response.country.name
169 except AttributeError:
170 response += ' | Location: Unknown'
171
172 region = city_response.subdivisions.most_specific.name
173 response += " | Region: %s" % region if region else ""
174 city = city_response.city.name
175 response += " | City: %s" % city if city else ""
176 isp = "AS" + str(asn_response.autonomous_system_number) + \
177 " " + asn_response.autonomous_system_organization
178 response += " | ISP: %s" % isp if isp else ""
179 bot.say(response)
180
181
182 if __name__ == "__main__":
183 from sopel.test_tools import run_example_tests
184 run_example_tests(__file__)
185
[end of sopel/modules/ip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/ip.py b/sopel/modules/ip.py
--- a/sopel/modules/ip.py
+++ b/sopel/modules/ip.py
@@ -19,6 +19,7 @@
from sopel.config.types import FilenameAttribute, StaticSection
from sopel.module import commands, example
+from sopel.tools import web
urlretrieve = None
try:
@@ -89,10 +90,19 @@
elif urlretrieve:
LOGGER.info('Downloading GeoIP database')
bot.say('Downloading GeoIP database, please wait...')
- geolite_urls = [
- 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz',
- 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz'
- ]
+
+ common_params = {'license_key': 'JXBEmLjOzislFnh4', 'suffix': 'tar.gz'}
+ base_url = 'https://download.maxmind.com/app/geoip_download'
+ geolite_urls = []
+
+ for edition in ['ASN', 'City']:
+ geolite_urls.append(
+ '{base}?{params}'.format(
+ base=base_url,
+ params=web.urlencode(dict(common_params, **{'edition_id': 'GeoLite2-%s' % edition})),
+ )
+ )
+
for url in geolite_urls:
LOGGER.debug('GeoIP Source URL: %s', url)
full_path = os.path.join(config.core.homedir, url.split("/")[-1])
|
{"golden_diff": "diff --git a/sopel/modules/ip.py b/sopel/modules/ip.py\n--- a/sopel/modules/ip.py\n+++ b/sopel/modules/ip.py\n@@ -19,6 +19,7 @@\n \n from sopel.config.types import FilenameAttribute, StaticSection\n from sopel.module import commands, example\n+from sopel.tools import web\n \n urlretrieve = None\n try:\n@@ -89,10 +90,19 @@\n elif urlretrieve:\n LOGGER.info('Downloading GeoIP database')\n bot.say('Downloading GeoIP database, please wait...')\n- geolite_urls = [\n- 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz',\n- 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz'\n- ]\n+\n+ common_params = {'license_key': 'JXBEmLjOzislFnh4', 'suffix': 'tar.gz'}\n+ base_url = 'https://download.maxmind.com/app/geoip_download'\n+ geolite_urls = []\n+\n+ for edition in ['ASN', 'City']:\n+ geolite_urls.append(\n+ '{base}?{params}'.format(\n+ base=base_url,\n+ params=web.urlencode(dict(common_params, **{'edition_id': 'GeoLite2-%s' % edition})),\n+ )\n+ )\n+\n for url in geolite_urls:\n LOGGER.debug('GeoIP Source URL: %s', url)\n full_path = os.path.join(config.core.homedir, url.split(\"/\")[-1])\n", "issue": "ip: MaxMind GeoLite2 Changes\n### Description\r\nMaxMind appears to now require an account to download the GeoLite2 databases, which Sopel currently uses. The geolite.maxmind.com domain, where Sopel downloads the databases, no longer exists, which is causing tests to fail.\r\n\r\n### Reproduction steps\r\n1. Clone master\r\n2. Run `./pytest_run.py -v sopel/modules/ip.py`\r\n3. Observe `sopel/modules/ip.py::test_example_ip_0 FAILED`\r\n\r\n### Expected behavior\r\nTest passes\r\n\r\n### Notes\r\nThere seems to be an API with a [python library](https://pypi.org/project/geoip2/), but that also requires an account.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nip.py - Sopel GeoIP Lookup Module\nCopyright 2011, Dimitri Molenaars, TyRope.nl,\nCopyright \u00a9 2013, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport logging\nimport os\nimport socket\nimport tarfile\n\nimport geoip2.database\n\nfrom sopel.config.types import FilenameAttribute, StaticSection\nfrom sopel.module import commands, example\n\nurlretrieve = None\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n try:\n # urlretrieve has been put under urllib.request in Python 3.\n # It's also deprecated so this should probably be replaced with\n # urllib2.\n from urllib.request import urlretrieve\n except ImportError:\n pass\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass GeoipSection(StaticSection):\n GeoIP_db_path = FilenameAttribute('GeoIP_db_path', directory=True)\n \"\"\"Path of the directory containing the GeoIP database files.\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | GeoIP\\\\_db\\\\_path | /home/sopel/GeoIP/ | Path to the GeoIP database files |\n \"\"\"\n config.define_section('ip', GeoipSection)\n config.ip.configure_setting('GeoIP_db_path',\n 'Path of the GeoIP db files')\n\n\ndef setup(bot):\n bot.config.define_section('ip', GeoipSection)\n\n\ndef _decompress(source, target, delete_after_decompression=True):\n \"\"\"Decompress just the database from the archive\"\"\"\n # https://stackoverflow.com/a/16452962\n tar = tarfile.open(source)\n for member in tar.getmembers():\n if \".mmdb\" in member.name:\n member.name = os.path.basename(member.name)\n tar.extract(member, target)\n if delete_after_decompression:\n os.remove(source)\n\n\ndef _find_geoip_db(bot):\n \"\"\"Find the GeoIP database\"\"\"\n config = bot.config\n if config.ip.GeoIP_db_path:\n cities_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLite2-City.mmdb')\n ipasnum_db = os.path.join(config.ip.GeoIP_db_path, 'GeoLite2-ASN.mmdb')\n if (os.path.isfile(cities_db) and os.path.isfile(ipasnum_db)):\n return config.ip.GeoIP_db_path\n else:\n LOGGER.warning(\n 'GeoIP path configured but DB not found in configured path')\n\n if (os.path.isfile(os.path.join(config.core.homedir, 'GeoLite2-City.mmdb')) and\n os.path.isfile(os.path.join(config.core.homedir, 'GeoLite2-ASN.mmdb'))):\n return config.core.homedir\n elif (os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLite2-City.mmdb')) and\n os.path.isfile(os.path.join('/usr/share/GeoIP', 'GeoLite2-ASN.mmdb'))):\n return '/usr/share/GeoIP'\n elif urlretrieve:\n LOGGER.info('Downloading GeoIP database')\n bot.say('Downloading GeoIP database, please wait...')\n geolite_urls = [\n 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-City.tar.gz',\n 'https://geolite.maxmind.com/download/geoip/database/GeoLite2-ASN.tar.gz'\n ]\n for url in geolite_urls:\n LOGGER.debug('GeoIP Source URL: %s', url)\n full_path = os.path.join(config.core.homedir, url.split(\"/\")[-1])\n urlretrieve(url, full_path)\n _decompress(full_path, config.core.homedir)\n return bot.config.core.homedir\n else:\n return False\n\n\n@commands('iplookup', 'ip')\n@example('.ip 8.8.8.8',\n r'\\[IP\\/Host Lookup\\] Hostname: \\S*dns\\S*\\.google\\S* \\| Location: United States \\| ISP: AS15169 Google LLC',\n re=True,\n ignore='Downloading GeoIP database, please wait...',\n online=True)\ndef ip(bot, trigger):\n \"\"\"IP Lookup tool\"\"\"\n # Check if there is input at all\n if not trigger.group(2):\n return bot.reply(\"No search term.\")\n # Check whether the input is an IP or hostmask or a nickname\n decide = ['.', ':']\n if any(x in trigger.group(2) for x in decide):\n # It's an IP/hostname!\n query = trigger.group(2).strip()\n else:\n # Need to get the host for the username\n username = trigger.group(2).strip()\n user_in_botdb = bot.users.get(username)\n if user_in_botdb is not None:\n query = user_in_botdb.host\n\n # Sanity check - sometimes user information isn't populated yet\n if query is None:\n return bot.say(\"I don't know that user's host.\")\n else:\n return bot.say(\"I\\'m not aware of this user.\")\n\n db_path = _find_geoip_db(bot)\n if db_path is False:\n LOGGER.error('Can\\'t find (or download) usable GeoIP database.')\n bot.say('Sorry, I don\\'t have a GeoIP database to use for this lookup.')\n return False\n\n if ':' in query:\n try:\n socket.inet_pton(socket.AF_INET6, query)\n except (OSError, socket.error): # Python 2/3 compatibility\n return bot.say(\"[IP/Host Lookup] Unable to resolve IP/Hostname\")\n elif '.' in query:\n try:\n socket.inet_pton(socket.AF_INET, query)\n except (socket.error, socket.herror):\n try:\n query = socket.getaddrinfo(query, None)[0][4][0]\n except socket.gaierror:\n return bot.say(\"[IP/Host Lookup] Unable to resolve IP/Hostname\")\n else:\n return bot.say(\"[IP/Host Lookup] Unable to resolve IP/Hostname\")\n\n city = geoip2.database.Reader(os.path.join(db_path, 'GeoLite2-City.mmdb'))\n asn = geoip2.database.Reader(os.path.join(db_path, 'GeoLite2-ASN.mmdb'))\n host = socket.getfqdn(query)\n try:\n city_response = city.city(query)\n asn_response = asn.asn(query)\n except geoip2.errors.AddressNotFoundError:\n return bot.say(\"[IP/Host Lookup] The address is not in the database.\")\n\n response = \"[IP/Host Lookup] Hostname: %s\" % host\n try:\n response += \" | Location: %s\" % city_response.country.name\n except AttributeError:\n response += ' | Location: Unknown'\n\n region = city_response.subdivisions.most_specific.name\n response += \" | Region: %s\" % region if region else \"\"\n city = city_response.city.name\n response += \" | City: %s\" % city if city else \"\"\n isp = \"AS\" + str(asn_response.autonomous_system_number) + \\\n \" \" + asn_response.autonomous_system_organization\n response += \" | ISP: %s\" % isp if isp else \"\"\n bot.say(response)\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/ip.py"}]}
| 2,792 | 355 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.